本文整理汇总了Python中daemon.DaemonContext.open方法的典型用法代码示例。如果您正苦于以下问题:Python DaemonContext.open方法的具体用法?Python DaemonContext.open怎么用?Python DaemonContext.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类daemon.DaemonContext
的用法示例。
在下文中一共展示了DaemonContext.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: daemonise
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def daemonise():
from daemon import DaemonContext
from pidfile import PidFile
daemon = DaemonContext(pidfile=PidFile("/var/run/doorbot.pid"))
daemon.open()
logging.info('Daemonised doorbot')
示例2: open
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def open(self):
# Call super
DaemonContext.open(self)
# Needfuls.doit()
self.logger.info('Initializing...')
# RPC connection
self.connection = Connection(self.config['rpchost'])
self.logger.info('Loading plugins...')
# Import and create plugin objects
self.plugins = [
plugin(self.connection, self.config, self.logger.handler)
for plugin in [
getattr(
__import__(
'rpcdaemon.plugins.' + module.lower(),
fromlist=[module]
),
module)
for module in self.config['plugins'].split()
]
]
# Setup worker with plugins and crank it up
self.logger.info('Starting worker...')
self.worker = Worker(self.connection, self.plugins,
handler=self.logger.handler)
self.worker.daemon = True # Daemon thread
self.worker.start()
self.logger.info('Started.')
示例3: transfer
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def transfer(app, transfer_job_id):
transfer_job = app.get_transfer_job(transfer_job_id)
if transfer_job is None:
log.error('Invalid transfer job ID: %s' % transfer_job_id)
return False
port_range = app.config.get('app:main', 'transfer_worker_port_range')
try:
port_range = [int(p) for p in port_range.split('-')]
except Exception as e:
log.error('Invalid port range set in transfer_worker_port_range: %s: %s' % (port_range, str(e)))
return False
protocol = transfer_job.params['protocol']
if protocol not in ('http', 'https', 'scp'):
log.error('Unsupported protocol: %s' % protocol)
return False
state_result = StateResult(result=dict(state=transfer_job.states.RUNNING, info='Transfer process starting up.'))
listener_server = ListenerServer(range(port_range[0], port_range[1] + 1), ListenerRequestHandler, app, transfer_job, state_result)
# daemonize here (if desired)
if not debug:
daemon_context = DaemonContext(files_preserve=[listener_server.fileno()], working_directory=os.getcwd())
daemon_context.open()
# If this fails, it'll never be detected. Hopefully it won't fail since it succeeded once.
app.connect_database() # daemon closed the database fd
transfer_job = app.get_transfer_job(transfer_job_id)
listener_thread = threading.Thread(target=listener_server.serve_forever)
listener_thread.setDaemon(True)
listener_thread.start()
# Store this process' pid so unhandled deaths can be handled by the restarter
transfer_job.pid = os.getpid()
app.sa_session.add(transfer_job)
app.sa_session.flush()
terminal_state = None
if protocol in ['http', 'https']:
for transfer_result_dict in http_transfer(transfer_job):
state_result.result = transfer_result_dict
if transfer_result_dict['state'] in transfer_job.terminal_states:
terminal_state = transfer_result_dict
elif protocol in ['scp']:
# Transfer the file using scp
transfer_result_dict = scp_transfer(transfer_job)
# Handle the state of the transfer
state = transfer_result_dict['state']
state_result.result = transfer_result_dict
if state in transfer_job.terminal_states:
terminal_state = transfer_result_dict
if terminal_state is not None:
transfer_job.state = terminal_state['state']
for name in ['info', 'path']:
if name in terminal_state:
transfer_job.__setattr__(name, terminal_state[name])
else:
transfer_job.state = transfer_job.states.ERROR
transfer_job.info = 'Unknown error encountered by transfer worker.'
app.sa_session.add(transfer_job)
app.sa_session.flush()
return True
示例4: open
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def open( self ):
self.files_preserve =\
list( tuple(self.files_preserve) + tuple( logger.handler.stream for logger in self.loggers ) )
_DaemonContext.open( self )
gevent.reinit()
## not reliable w/ gevent, unfortunateley .. use stderr redirect instead
#log = logging.getLogger('UNHANDLED')
#sys.excepthook = lambda tp, value, tb:\
# log.error( ''.join( traceback.format_exception( tp, value, tb ) ) )
gevent.signal(signal.SIGTERM, self.run_exit_hooks, signal.SIGTERM, None )
示例5: start
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def start(self, detachProcess=True):
pidFile = TimeoutPIDLockFile(self._pidFile)
context = DaemonContext(
working_directory=self._runDir,
umask=0o002,
pidfile=pidFile,
detach_process=detachProcess,
)
context.signal_map = {
signal.SIGTERM: 'terminate',
signal.SIGHUP: 'terminate',
signal.SIGUSR1: 'terminate',
}
if self._isRunningAndBreak(pidFile):
raise AlreadyRunning("PID file locked and process not stale")
self._context = context
try:
context.open()
self._setupLogging()
except:
if self.logger is None:
self._setupLogging()
self.logger.warn("Exception while entering context", exc_info=True)
try:
context.close()
except:
pass
return
try:
self.run()
except Exception as e:
self.logger.error("Exception in run()", exc_info=e)
finally:
self.logger.debug("Shutting down daemon")
self.shutdown()
try:
self._fHandler.close()
except:
pass
try:
context.close()
except:
pass
示例6: start_server
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def start_server(config):
weblogger = logging.getLogger('plight_httpd')
weblogger.setLevel(config['web_log_level'])
if weblogger.handlers == []:
weblogging_handler = RotatingFileHandler(config['web_log_file'],
mode='a',
maxBytes=config['web_log_filesize'],
backupCount=config['web_log_rotation_count'])
weblogger.addHandler(weblogging_handler)
applogger = logging.getLogger('plight')
applogger.setLevel(config['log_level'])
if applogger.handlers == []:
applogging_handler = RotatingFileHandler(config['log_file'],
mode='a',
maxBytes=config['log_filesize'],
backupCount=config['log_rotation_count'])
applogger.addHandler(applogging_handler)
pidfile = PIDLockFile(PID_FILE)
context = DaemonContext(pidfile=pidfile,
uid=pwd.getpwnam(config['user']).pw_uid,
gid=grp.getgrnam(config['group']).gr_gid,
files_preserve = [
weblogging_handler.stream,
applogging_handler.stream,
],)
context.stdout = applogging_handler.stream
context.stderr = applogging_handler.stream
context.open()
os.umask(0022)
try:
try:
log_message('Plight is starting...')
node_status = plight.NodeStatus(config['state_file'])
server_class = BaseHTTPServer.HTTPServer
http = server_class((config['host'],
config['port']),
plight.StatusHTTPRequestHandler)
http.serve_forever()
except SystemExit, sysexit:
log_message("Stopping... " + str(sysexit))
except Exception, ex:
log_message("ERROR: " + str(ex))
示例7: run_as_daemon
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def run_as_daemon(ssm_inst):
"""
Given an SSM object, start it as a daemon process.
"""
log.info("The SSM will run as a daemon.")
# We need to preserve the file descriptor for any log files.
log_files = [x.stream for x in log.handlers]
dc = DaemonContext(files_preserve=log_files)
try:
# Note: because we need to be compatible with python 2.4, we can't use
# with dc:
# here - we need to call the open() and close() methods
# manually.
dc.open()
try:
ssm_inst.startup()
except Exception, err:
print err
print type(err)
print dir(err)
log.info("SSM failed to start: " + str(err))
raise
# Only an exception will break this loop.
# A SystemExit exception will be raised if the process is killed.
while True:
if ssm_inst.is_dead():
raise ssm_inst.get_death_exception()
# Process all the messages one at a time before continuing
try:
while ssm_inst.process_outgoing():
pass
except ssm.SsmException, err:
# SsmException if the message is rejected by the consumer.
# We can wait and try again.
log.error('Error in message processing: '+str(err))
except EncryptException, err:
# EncryptException if something went wrong trying to encrypt
# or sign. Give up.
log.error("Failed to encrypt or sign:" + str(err))
raise
示例8: main
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
#.........这里部分代码省略.........
# Twisted ticket: http://twistedmatrix.com/trac/ticket/3868
# -mario
try:
from twisted.python import lockfile
except ImportError:
from orbited import __path__ as orbited_path
sys.path.append(os.path.join(orbited_path[0],"hotfixes","win32api"))
from twisted.python import lockfile
lockfile.kill = None
#################
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-c",
"--config",
dest="config",
default=None,
help="path to configuration file"
)
parser.add_option(
"-v",
"--version",
dest="version",
action="store_true",
default=False,
help="print Orbited version"
)
parser.add_option(
"-p",
"--profile",
dest="profile",
action="store_true",
default=False,
help="run Orbited with a profiler"
)
parser.add_option(
"-q",
"--quickstart",
dest="quickstart",
action="store_true",
default=False,
help="run Orbited on port 8000 and MorbidQ on port 61613"
)
parser.add_option(
"-d",
"--daemon",
dest="daemon",
action="store_true",
default=False,
help="run Orbited as a daemon (requires the python-daemon package)"
)
parser.add_option(
"--pid-file",
dest="pidfile",
default="/var/run/orbited/orbited.pid",
help=("use PIDFILE as the orbited daemon pid file",
"; defaults to '/var/run/orbited/orbited.pid'"),
)
MemoryUtil.add_options_to_parser(parser)
(options, args) = parser.parse_args()
if args:
print 'the "orbited" command does not accept positional arguments. type "orbited -h" for options.'
sys.exit(1)
if options.version:
print "Orbited version: %s" % (version,)
sys.exit(0)
global logger
if options.quickstart:
logging.basicConfig()
logger = logging.getLogger(__name__)
config.map['[listen]'].append('http://:8000')
config.map['[listen]'].append('stomp://:61613')
config.map['[access]'][('localhost',61613)] = ['*']
logger.info("Quickstarting Orbited")
else:
# load configuration from configuration
# file and from command line arguments.
config.setup(options=options)
logging.config.fileConfig(options.config)
logger = logging.getLogger(__name__)
logger.info("Starting Orbited with config file %s" % options.config)
if options.daemon:
try:
from daemon import DaemonContext
from daemon.pidfile import PIDLockFile
pidlock = PIDLockFile(options.pidfile)
daemon = DaemonContext(pidfile=pidlock)
logger.debug('daemonizing with pid file %r', options.pidfile)
daemon.open()
logger.debug('daemonized!')
except Exception, exc:
logger.debug(exc)
示例9: run_worker
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
supervised=False, working_directory=None, chroot=None,
statistics=None, **kwargs):
"""Starts the celery worker server."""
# set SIGCLD back to the default SIG_DFL (before python-daemon overrode
# it) lets the parent wait() for the terminated child process and stops
# the 'OSError: [Errno 10] No child processes' problem.
if hasattr(signal, "SIGCLD"): # Make sure the platform supports signals.
signal.signal(signal.SIGCLD, signal.SIG_DFL)
print("Celery %s is starting." % __version__)
if statistics is not None:
settings.CELERY_STATISTICS = statistics
if not concurrency:
concurrency = multiprocessing.cpu_count()
if conf.CELERY_BACKEND == "database" \
and settings.DATABASE_ENGINE == "sqlite3" and \
concurrency > 1:
import warnings
warnings.warn("The sqlite3 database engine doesn't support "
"concurrency. We'll be using a single process only.",
UserWarning)
concurrency = 1
# Setup logging
if not isinstance(loglevel, int):
loglevel = LOG_LEVELS[loglevel.upper()]
if not detach:
logfile = None # log to stderr when not running in the background.
if discard:
discarded_count = discard_all()
what = discarded_count > 1 and "messages" or "message"
print("discard: Erased %d %s from the queue.\n" % (
discarded_count, what))
# Dump configuration to screen so we have some basic information
# when users sends e-mails.
print(STARTUP_INFO_FMT % {
"vhost": getattr(settings, "AMQP_VHOST", "(default)"),
"host": getattr(settings, "AMQP_SERVER", "(default)"),
"port": getattr(settings, "AMQP_PORT", "(default)"),
"exchange": conf.AMQP_EXCHANGE,
"exchange_type": conf.AMQP_EXCHANGE_TYPE,
"consumer_queue": conf.AMQP_CONSUMER_QUEUE,
"consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
"publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
"concurrency": concurrency,
"loglevel": loglevel,
"pidfile": pidfile,
"statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
})
print("Celery has started.")
if detach:
if not CAN_DETACH:
raise RuntimeError(
"This operating system doesn't support detach. ")
from daemon import DaemonContext
from celery.log import setup_logger, redirect_stdouts_to_logger
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile
if logfile:
open(logfile, "a").close()
pidlock = acquire_pidlock(pidfile)
if umask is None:
umask = 0
if uid is None:
uid = os.geteuid()
if gid is None:
gid = os.getegid()
working_directory = working_directory or os.getcwd()
context = DaemonContext(chroot_directory=chroot,
working_directory=working_directory,
umask=umask,
pidfile=pidlock,
uid=uid,
gid=gid)
context.open()
logger = setup_logger(loglevel, logfile)
redirect_stdouts_to_logger(logger, loglevel)
# Run the worker init handler.
# (Usually imports task modules and such.)
current_loader.on_worker_init()
def run_worker():
worker = WorkController(concurrency=concurrency,
loglevel=loglevel,
logfile=logfile,
is_detached=detach)
#.........这里部分代码省略.........
示例10: DaemonRunner
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
class DaemonRunner(object):
""" Controller for a callable running in a separate background process.
The first command-line argument is the action to take:
* 'start': Become a daemon and call `app.run()`.
* 'stop': Close the daemon context.
* 'restart': Stop, then start.
"""
start_message = "started with pid %(pid)d"
def __init__(self, app):
""" Set up the parameters of a new runner.
The `app` argument must have the following attributes:
* `stdin_path`, `stdout_path`, `stderr_path`: Filesystem
paths to open and replace the existing `sys.stdin`,
`sys.stdout`, `sys.stderr`.
* `pidfile_path`: Filesystem path to a file that will be
used as the PID file for the daemon.
* `run`: Callable that will be invoked when the daemon is
started.
"""
self.parse_args()
self.app = app
self.daemon_context = DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'r')
self.daemon_context.stdout = open(app.stdout_path, 'w+')
self.daemon_context.stderr = open(
app.stderr_path, 'w+', buffering=0)
self.pidfile = make_pidlockfile(app.pidfile_path)
self.daemon_context.pidfile = self.pidfile
def _usage_exit(self, argv):
""" Emit a usage message, then exit.
"""
progname = os.path.basename(argv[0])
usage_exit_code = 2
action_usage = "|".join(self.action_funcs.keys())
sys.stderr.write(
"usage: %(progname)s %(action_usage)s\n" % vars())
sys.exit(usage_exit_code)
def parse_args(self, argv=None):
""" Parse command-line arguments.
"""
if argv is None:
argv = sys.argv
min_args = 2
if len(argv) < min_args:
self._usage_exit(argv)
self.action = argv[1]
if self.action not in self.action_funcs:
self._usage_exit(argv)
def _start(self):
""" Open the daemon context and run the application.
"""
if self.pidfile.is_locked():
pidfile_path = self.pidfile.path
if pidfile_lock_is_stale(self.pidfile):
self.pidfile.break_lock()
else:
error = SystemExit(
"PID file %(pidfile_path)r already locked"
% vars())
raise error
self.daemon_context.open()
pid = os.getpid()
message = self.start_message % vars()
sys.stderr.write("%(message)s\n" % vars())
sys.stderr.flush()
self.app.run()
def _stop(self):
""" Close the daemon context.
"""
if not self.pidfile.is_locked():
pidfile_path = self.pidfile.path
error = SystemExit(
"PID file %(pidfile_path)r not locked"
% vars())
raise error
if pidfile_lock_is_stale(self.pidfile):
self.pidfile.break_lock()
else:
pid = self.pidfile.read_pid()
#.........这里部分代码省略.........
示例11: docopt
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
logger.error(traceback.format_exc())
try:
agent.stop()
zwave_network.stop()
except Exception:
pass
logger.debug('Restarting in 3 sec...')
time.sleep(3)
if __name__ == '__main__':
arguments = docopt(__doc__)
if arguments['--version']:
print('Version: {}'.format(VERSION))
exit(0)
if arguments['run']:
if arguments['--daemon']:
from daemon import pidfile, DaemonContext
pid = pidfile.TimeoutPIDLockFile('/var/run/zeyeagent.pid', 10)
ctx = DaemonContext(working_directory='.', pidfile=pid)
ctx.open()
start_agent(
device=arguments['--device'],
config_path=arguments['--config'],
user_path=arguments['--user_path'],
debug=arguments['--debug'] or arguments['--full-debug'],
pyagentx_debug=arguments['--full-debug'])
示例12: Runner
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
class Runner(object):
""" Controller for a callable running in a separate background process.
The first command-line argument is the action to take:
* 'start': Become a daemon and call `app.run()`.
* 'stop': Exit the daemon process specified in the PID file.
* 'restart': Stop, then start.
* 'status': Show the status of the process.
"""
start_message = "started with pid %d"
status_message_running = "process is running (%d)"
status_message_not_running = "process is not running"
def __init__(self):
""" Set up the parameters of a new runner.
The `app` argument must have the following attributes:
* `stdin_path`, `stdout_path`, `stderr_path`: Filesystem
paths to open and replace the existing `sys.stdin`,
`sys.stdout`, `sys.stderr`.
* `pidfile_path`: Absolute filesystem path to a file that
will be used as the PID file for the daemon. If
``None``, no PID file will be used.
* `pidfile_timeout`: Used as the default acquisition
timeout value supplied to the runner's PID lock file.
"""
self.options = {}
self.pidfile_timeout = 6.0
self.stdout_path = None
self.stderr_path = None
self.pidfile_path = None
self.userid = None
self.args = None
self.parse_args()
self.pidfile = None
self.pidfile_path = os.path.join(self.options['pid_dir'], self.options['service'] + ".pid")
self.pidfile = make_pidlockfile(
self.pidfile_path, self.pidfile_timeout)
if self.pidfile.is_locked() and not is_pidfile_stale(self.pidfile) \
and self.action == 'start':
print("Process already running. Exiting.")
sys.exit(1)
if (not self.pidfile.is_locked() or is_pidfile_stale(self.pidfile)) \
and (self.action == 'stop' or self.action == 'kill'):
print("Process not running. Exiting.")
sys.exit(1)
def app_run(self):
"""
The running process of the application
"""
raise RunnerInvalidActionError("Action: %(action)r is not implemented" % vars(self))
def app_shutdown(self):
"""
The shutdown process of the application
"""
raise RunnerInvalidActionError("Action: %(action)r is not implemented" % vars(self))
def _usage_exit(self, args):
""" Emit a usage message, then exit.
"""
usage_exit_code = 2
message = "usage: use --help to get help" % vars()
emit_message(message)
sys.exit(usage_exit_code)
def parse_args(self):
""" Parse command-line arguments.
"""
args = jnt_parse_args()
self.options = vars(args)
self.action = args.command
self.args = args
self.stdout_path = os.path.join(self.options['log_dir'], self.options['service'] + "_out.log")
self.stderr_path = os.path.join(self.options['log_dir'], self.options['service'] + "_err.log")
self.pidfile_path = os.path.join(self.options['pid_dir'], self.options['service'] + ".pid")
if self.options['user'] and self.options['user'] != "":
self.userid = pwd.getpwnam(self.options['user']).pw_uid
if self.userid != os.getuid():
#print self.userid
os.setuid(self.userid)
try:
os.makedirs(self.options['pid_dir'])
os.makedirs(self.options['home_dir'])
os.makedirs(self.options['conf_dir'])
os.makedirs(self.options['log_dir'])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
if self.action not in self.action_funcs:
self._usage_exit(args)
#.........这里部分代码省略.........
示例13: LoggingDaemonRunner
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
class LoggingDaemonRunner(object):
""" Controller for a callable running in a separate background process.
The first command-line argument is the action to take:
* 'start': Become a daemon and call `app.run()`.
* 'stop': Exit the daemon process specified in the PID file.
* 'restart': Stop, then start.
"""
start_message = "started with pid %(pid)d"
def _addLoggerFiles(self):
"adds all files related to loggers_preserve to files_preserve"
for logger in [self.stdout_logger, self.stderr_logger]:
if logger:
self.loggers_preserve.append(logger)
loggerFiles = openFilesFromLoggers(self.loggers_preserve)
self.daemon_context.files_preserve.extend(loggerFiles)
def __init__(self, app, parser):
""" Set up the parameters of a new runner.
The `app` argument must have the following attributes:
* `stdin_path`: Filesystem path to open and replace `sys.stdin`.
* `stdout_logger`, `stderr_logger`: Loggers to redirect
the existing `sys.stdout` and `sys.stderr`.
* `pidfile_path`: Absolute filesystem path to a file that
will be used as the PID file for the daemon. If
``None``, no PID file will be used.
* `pidfile_timeout`: Used as the default acquisition
timeout value supplied to the runner's PID lock file.
* `run`: Callable that will be invoked when the daemon is
started.
The `parser` argument must be an optparse.OptionParser() object.
"""
self.app = app
self.parser = parser
(options, args) = parser.parse_args()
self.args = args
self.options = options
self.parse_args()
self.daemon_context = DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'r')
self.stdout_logger = app.stdout_logger
self.stderr_logger = app.stderr_logger
self.daemon_context.files_preserve = []
self.loggers_preserve = []
self.pidfile = None
if app.pidfile_path is not None:
self.pidfile = make_pidlockfile(
app.pidfile_path, app.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
def _usage_exit(self, usage_exit_code):
""" Emit a usage message, then exit.
"""
emit_message(self.parser.print_usage())
sys.exit(usage_exit_code)
def open(self):
self._addLoggerFiles()
self.daemon_context.open()
if self.stdout_logger:
fileLikeObj = FileLikeLogger(self.stdout_logger)
sys.stdout = fileLikeObj
if self.stderr_logger:
fileLikeObj = FileLikeLogger(self.stderr_logger)
sys.stderr = fileLikeObj
def parse_args(self):
""" Parse command-line arguments.
"""
self.action = self.args[0]
if self.action not in self.action_funcs:
self._usage_exit(1)
def _start(self):
""" Open the daemon context and run the application.
"""
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
try:
self.open()
except daemon.pidlockfile.AlreadyLocked:
pidfile_path = self.pidfile.path
raise DaemonRunnerStartFailureError(
"PID file %(pidfile_path)r already locked" % vars())
pid = os.getpid()
#.........这里部分代码省略.........
示例14: run_worker
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
supervised=False, working_directory=None, chroot=None,
statistics=None, **kwargs):
"""Starts the celery worker server."""
print("Celery %s is starting." % __version__)
if statistics:
settings.CELERY_STATISTICS = statistics
if not concurrency:
concurrency = multiprocessing.cpu_count()
if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1:
import warnings
warnings.warn("The sqlite3 database engine doesn't support "
"concurrency. We'll be using a single process only.",
UserWarning)
concurrency = 1
# Setup logging
if not isinstance(loglevel, int):
loglevel = LOG_LEVELS[loglevel.upper()]
if not detach:
logfile = None # log to stderr when not running in the background.
if discard:
discarded_count = discard_all()
what = discarded_count > 1 and "messages" or "message"
print("discard: Erased %d %s from the queue.\n" % (
discarded_count, what))
# Dump configuration to screen so we have some basic information
# when users sends e-mails.
print(STARTUP_INFO_FMT % {
"vhost": settings.AMQP_VHOST,
"host": settings.AMQP_SERVER,
"port": settings.AMQP_PORT,
"exchange": conf.AMQP_EXCHANGE,
"exchange_type": conf.AMQP_EXCHANGE_TYPE,
"consumer_queue": conf.AMQP_CONSUMER_QUEUE,
"consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
"publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
"concurrency": concurrency,
"loglevel": loglevel,
"pidfile": pidfile,
"statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
})
print("Celery has started.")
if detach:
if not CAN_DETACH:
raise RuntimeError(
"This operating system doesn't support detach. ")
from daemon import DaemonContext
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile
if logfile:
open(logfile, "a").close()
pidlock = acquire_pidlock(pidfile)
if not umask:
umask = 0
uid = uid and int(uid) or os.geteuid()
gid = gid and int(gid) or os.getegid()
working_directory = working_directory or os.getcwd()
context = DaemonContext(chroot_directory=chroot,
working_directory=working_directory,
umask=umask,
pidfile=pidlock,
uid=uid,
gid=gid)
context.open()
discovery.autodiscover()
def run_worker():
worker = WorkController(concurrency=concurrency,
loglevel=loglevel,
logfile=logfile,
is_detached=detach)
try:
worker.start()
except Exception, e:
emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
e.__class__, e, traceback.format_exc()))
示例15: get_dns
# 需要导入模块: from daemon import DaemonContext [as 别名]
# 或者: from daemon.DaemonContext import open [as 别名]
log.info('Fetching valid DNs.')
dns = get_dns(options.dn_file)
ssm.set_dns(dns)
except Exception, e:
log.fatal('Failed to initialise SSM: %s', e)
log.info(LOG_BREAK)
sys.exit(1)
try:
# Note: because we need to be compatible with python 2.4, we can't use
# with dc:
# here - we need to call the open() and close() methods
# manually.
dc.open()
ssm.startup()
i = 0
# The message listening loop.
while True:
time.sleep(1)
if i % REFRESH_DNS == 0:
log.info('Refreshing the valid DNs.')
dns = get_dns(options.dn_file)
ssm.set_dns(dns)
try:
log.info('Sending ping.')
ssm.send_ping()