本文整理汇总了Python中freenas.dispatcher.client.Client.enable_server方法的典型用法代码示例。如果您正苦于以下问题:Python Client.enable_server方法的具体用法?Python Client.enable_server怎么用?Python Client.enable_server使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类freenas.dispatcher.client.Client
的用法示例。
在下文中一共展示了Client.enable_server方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_back_to_back
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
def test_back_to_back(self):
a, b = socket.socketpair()
self.assertGreaterEqual(a.fileno(), 0)
self.assertGreaterEqual(b.fileno(), 0)
c1 = Client()
c1.standalone_server = True
c1.enable_server()
c1.register_service('test', TestService())
c1.connect('fd://{0}'.format(a.fileno()))
self.assertTrue(c1.connected)
c2 = Client()
c2.connect('fd://{0}'.format(b.fileno()))
self.assertTrue(c2.connected)
self.assertEqual(c2.call_sync('test.hello', 'freenas'), 'Hello World, freenas')
c2.disconnect()
a.close()
c1.disconnect()
b.close()
示例2: setup_back_to_back
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
def setup_back_to_back(self, streaming=False):
a, b = socket.socketpair()
self.assertGreaterEqual(a.fileno(), 0)
self.assertGreaterEqual(b.fileno(), 0)
c1 = Client()
c1._s = a
c1.enable_server()
c1.standalone_server = True
if streaming:
c1.streaming = True
c1.rpc.streaming_enabled = True
c1.register_service('test', TestService())
c1.connect('fd://{0}'.format(a.fileno()))
self.assertTrue(c1.connected)
c2 = Client()
c2._s = b
c2.streaming = True
c2.connect('fd://{0}'.format(b.fileno()))
self.assertTrue(c2.connected)
return c1, c2
示例3: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('clid')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.config = None
self.logger = logging.getLogger()
self.plugin_dirs = []
self.ml = None
self.context = None
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_cli(self):
self.logger.info('Initializing CLI instance')
self.context = Context()
self.context.connection = self.client
self.context.plugin_dirs = PLUGIN_DIRS
self.context.discover_plugins()
self.context.start_entity_subscribers()
self.context.login_plugins()
self.ml = MainLoop(self.context)
self.logger.info('CLI instance ready')
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('clid')
self.client.enable_server()
self.client.call_sync('management.enable_features', ['streaming_responses'])
self.client.register_service('clid.management', ManagementService(self))
self.client.register_service('clid.eval', EvalService(self))
self.client.register_service('clid.debug', DebugService())
self.client.resume_service('clid.management')
self.client.resume_service('clid.eval')
self.client.resume_service('clid.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
args = parser.parse_args()
self.config = args.c
configure_logging('/var/log/clid.log', 'DEBUG')
setproctitle('clid')
self.init_dispatcher()
self.init_cli()
self.client.wait_forever()
示例4: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Context(object):
def __init__(self):
self.logger = logging.getLogger('schedulerd')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.scheduler = None
self.active_tasks = {}
def init_datastore(self):
try:
self.datastore = get_datastore(self.config)
except DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_scheduler(self):
store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client)
self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc)
self.scheduler.start()
def register_schemas(self):
self.client.register_schema('calendar-task', {
'type': 'object',
'additionalProperties': False,
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'args': {'type': 'array'},
'description': {'type': 'string'},
'enabled': {'type': 'boolean'},
'hidden': {'type': 'boolean'},
'protected': {'type': 'boolean'},
'status': {'$ref': 'calendar-task-status'},
'schedule': {
'type': 'object',
'additionalProperties': False,
'properties': {
'coalesce': {'type': ['boolean', 'integer', 'null']},
'year': {'type': ['string', 'integer', 'null']},
'month': {'type': ['string', 'integer', 'null']},
'day': {'type': ['string', 'integer', 'null']},
'week': {'type': ['string', 'integer', 'null']},
'day_of_week': {'type': ['string', 'integer', 'null']},
'hour': {'type': ['string', 'integer', 'null']},
'minute': {'type': ['string', 'integer', 'null']},
'second': {'type': ['string', 'integer', 'null']},
'timezone': {'type': ['string', 'null']}
}
}
}
})
self.client.register_schema('calendar-task-status', {
'type': 'object',
'properties': {
'next_run_time': {'type': 'string'},
'last_run_status': {'type': 'string'},
'current_run_status': {'type': ['string', 'null']},
'current_run_progress': {'type': ['object', 'null']}
}
})
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('schedulerd')
self.client.enable_server()
self.client.register_service('scheduler.management', ManagementService(self))
self.client.register_service('scheduler.debug', DebugService())
self.client.resume_service('scheduler.management')
self.client.resume_service('scheduler.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def run_job(self, *args, **kwargs):
tid = self.client.submit_task(*args)
self.active_tasks[kwargs['id']] = tid
self.client.call_sync('task.wait', tid, timeout=None)
result = self.client.call_sync('task.status', tid)
if result['state'] != 'FINISHED':
try:
self.client.call_sync('alerts.emit', {
'name': 'scheduler.task.failed',
#.........这里部分代码省略.........
示例5: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Context(object):
def __init__(self):
self.service = TaskProxyService(self)
self.task = queue.Queue(1)
self.datastore = None
self.configstore = None
self.conn = None
self.instance = None
self.running = Event()
def put_status(self, state, result=None, exception=None):
obj = {
'status': state,
'result': None
}
if result is not None:
obj['result'] = result
if exception is not None:
obj['error'] = serialize_error(exception)
self.conn.call_sync('task.put_status', obj)
def task_progress_handler(self, args):
if self.instance:
self.instance.task_progress_handler(args)
def collect_fds(self, obj):
if isinstance(obj, dict):
for v in obj.values():
if isinstance(v, FileDescriptor):
yield v
else:
yield from self.collect_fds(v)
if isinstance(obj, (list, tuple)):
for o in obj:
if isinstance(o, FileDescriptor):
yield o
else:
yield from self.collect_fds(o)
def close_fds(self, fds):
for i in fds:
try:
os.close(i.fd)
except OSError:
pass
def main(self):
if len(sys.argv) != 2:
print("Invalid number of arguments", file=sys.stderr)
sys.exit(errno.EINVAL)
key = sys.argv[1]
configure_logging(None, logging.DEBUG)
self.datastore = get_datastore()
self.configstore = ConfigStore(self.datastore)
self.conn = Client()
self.conn.connect('unix:')
self.conn.login_service('task.{0}'.format(os.getpid()))
self.conn.enable_server()
self.conn.rpc.register_service_instance('taskproxy', self.service)
self.conn.register_event_handler('task.progress', self.task_progress_handler)
self.conn.call_sync('task.checkin', key)
setproctitle.setproctitle('task executor (idle)')
while True:
try:
task = self.task.get()
logging.root.setLevel(self.conn.call_sync('management.get_logging_level'))
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
if task['debugger']:
sys.path.append('/usr/local/lib/dispatcher/pydev')
import pydevd
host, port = task['debugger']
pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)
name, _ = os.path.splitext(os.path.basename(task['filename']))
module = load_module_from_file(name, task['filename'])
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
fds = list(self.collect_fds(task['args']))
try:
self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
self.instance.configstore = self.configstore
self.instance.user = task['user']
self.instance.environment = task['environment']
self.running.set()
result = self.instance.run(*task['args'])
except BaseException as err:
print("Task exception: {0}".format(str(err)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if hasattr(self.instance, 'rollback'):
#.........这里部分代码省略.........
示例6: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
#.........这里部分代码省略.........
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_server(self, address):
self.server = Server(self)
self.server.rpc = self.rpc
self.server.start(address)
thread = Thread(target=self.server.serve_forever)
thread.name = 'ServerThread'
thread.daemon = True
thread.start()
def parse_config(self, filename):
try:
with open(filename, 'r') as f:
self.config = json.load(f)
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['dscached']['plugin-dirs']
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('dscached')
self.client.enable_server(self.rpc)
self.client.resume_service('dscached.account')
self.client.resume_service('dscached.group')
self.client.resume_service('dscached.host')
self.client.resume_service('dscached.management')
self.client.resume_service('dscached.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for f in os.listdir(dir):
name, ext = os.path.splitext(os.path.basename(f))
if ext != '.py':
continue
try:
plugin = imp.load_source(name, os.path.join(dir, f))
plugin._init(self)
except:
self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)
def register_plugin(self, name, cls):
self.plugins[name] = cls
self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))
def register_schema(self, name, schema):
示例7: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('etcd')
self.root = None
self.configfile = None
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.plugin_dirs = []
self.renderers = {}
self.managed_files = {}
def init_datastore(self):
try:
self.datastore = datastore.get_datastore(self.configfile)
except datastore.DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('etcd')
self.client.enable_server()
self.client.register_service('etcd.generation', FileGenerationService(self))
self.client.register_service('etcd.management', ManagementService(self))
self.client.register_service('etcd.debug', DebugService())
self.client.resume_service('etcd.generation')
self.client.resume_service('etcd.management')
self.client.resume_service('etcd.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def init_renderers(self):
for name, impl in TEMPLATE_RENDERERS.items():
self.renderers[name] = impl(self)
def parse_config(self, filename):
try:
f = open(filename, 'r')
self.config = json.load(f)
f.close()
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['etcd']['plugin-dirs']
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for root, dirs, files in os.walk(dir):
for name in files:
abspath = os.path.join(root, name)
path = os.path.relpath(abspath, dir)
name, ext = os.path.splitext(path)
if name in self.managed_files.keys():
continue
if ext in TEMPLATE_RENDERERS.keys():
self.managed_files[name] = abspath
self.logger.info('Adding managed file %s [%s]', name, ext)
def generate_file(self, file_path):
if file_path not in self.managed_files.keys():
raise RpcException(errno.ENOENT, 'No such file')
template_path = self.managed_files[file_path]
name, ext = os.path.splitext(template_path)
if ext not in self.renderers.keys():
raise RuntimeError("Can't find renderer for {0}".format(file_path))
renderer = self.renderers[ext]
try:
return renderer.render_template(template_path)
except Exception as e:
self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e)))
#.........这里部分代码省略.........
示例8: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('alertd')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.plugin_dirs = []
self.emitters = {}
def init_datastore(self):
try:
self.datastore = datastore.get_datastore()
except datastore.DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_reminder(self):
t = threading.Thread(target=self.reminder_thread)
t.daemon = True
t.start()
def parse_config(self, filename):
try:
f = open(filename, 'r')
self.config = json.load(f)
f.close()
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['alertd']['plugin-dirs']
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('alertd')
self.client.enable_server()
self.client.register_service('alertd.management', ManagementService(self))
self.client.register_service('alertd.alert', AlertService(self))
self.client.register_service('alertd.debug', DebugService())
self.client.resume_service('alertd.management')
self.client.resume_service('alertd.alert')
self.client.resume_service('alertd.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for f in os.listdir(dir):
name, ext = os.path.splitext(os.path.basename(f))
if ext != '.py':
continue
try:
plugin = imp.load_source(name, os.path.join(dir, f))
plugin._init(self)
except:
self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)
def emit_alert(self, alert):
self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class']))
for i in self.datastore.query('alert.filters'):
for predicate in i.get('predicates', []):
if predicate['operator'] not in operators_table:
continue
if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']):
break
else:
try:
emitter = self.emitters.get(i['emitter'])
if not emitter:
self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id']))
continue
self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id']))
if alert['send_count'] > 0:
#.........这里部分代码省略.........
示例9: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
#.........这里部分代码省略.........
if ev.filter == select.KQ_FILTER_PROC:
job = self.job_by_pid(ev.ident)
if job:
job.pid_event(ev)
continue
if ev.fflags & select.KQ_NOTE_CHILD:
if ev.fflags & select.KQ_NOTE_EXIT:
continue
pjob = self.job_by_pid(ev.data)
if not pjob:
self.untrack_pid(ev.ident)
continue
# Stop tracking at session ID boundary
try:
if pjob.pgid != os.getpgid(ev.ident):
self.untrack_pid(ev.ident)
continue
except ProcessLookupError:
continue
with self.lock:
job = Job(self)
job.load_anonymous(pjob, ev.ident)
self.jobs[job.id] = job
self.logger.info('Added job {0}'.format(job.label))
def track_pid(self, pid):
ev = select.kevent(
pid,
select.KQ_FILTER_PROC,
select.KQ_EV_ADD | select.KQ_EV_ENABLE,
select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK,
0, 0
)
self.kq.control([ev], 0)
def untrack_pid(self, pid):
ev = select.kevent(
pid,
select.KQ_FILTER_PROC,
select.KQ_EV_DELETE,
0, 0, 0
)
with contextlib.suppress(FileNotFoundError):
self.kq.control([ev], 0)
def emit_event(self, name, args):
self.server.broadcast_event(name, args)
if self.client and self.client.connected:
self.client.emit_event(name, args)
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('serviced')
self.client.enable_server(self.rpc)
self.client.resume_service('serviced.job')
self.client.resume_service('serviced.management')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def bootstrap(self):
def doit():
with self.lock:
job = Job(self)
job.load({
'Label': 'org.freenas.serviced.bootstrap',
'ProgramArguments': BOOTSTRAP_JOB,
'OneShot': True,
'RunAtLoad': True,
})
self.jobs[job.id] = job
Thread(target=doit).start()
def shutdown(self):
self.client.disconnect()
self.server.close()
sys.exit(0)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
args = parser.parse_args()
configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
bsd.setproctitle('serviced')
self.logger.info('Started')
self.init_server(args.s)
self.bootstrap()
self.event_loop()
示例10: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Context(object):
def __init__(self):
self.service = TaskProxyService(self)
self.task = queue.Queue(1)
self.datastore = None
self.configstore = None
self.conn = None
self.instance = None
self.running = Event()
def put_status(self, state, result=None, exception=None):
obj = {
'status': state,
'result': None
}
if result is not None:
obj['result'] = result
if exception is not None:
obj['error'] = serialize_error(exception)
self.conn.call_sync('task.put_status', obj)
def main(self):
if len(sys.argv) != 2:
print("Invalid number of arguments", file=sys.stderr)
sys.exit(errno.EINVAL)
key = sys.argv[1]
logging.basicConfig(level=logging.DEBUG)
self.datastore = get_datastore()
self.configstore = ConfigStore(self.datastore)
self.conn = Client()
self.conn.connect('unix:')
self.conn.login_service('task.{0}'.format(os.getpid()))
self.conn.enable_server()
self.conn.rpc.register_service_instance('taskproxy', self.service)
self.conn.call_sync('task.checkin', key)
setproctitle.setproctitle('task executor (idle)')
while True:
try:
task = self.task.get()
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
if task['debugger']:
sys.path.append('/usr/local/lib/dispatcher/pydev')
import pydevd
host, port = task['debugger']
pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)
name, _ = os.path.splitext(os.path.basename(task['filename']))
module = load_module_from_file(name, task['filename'])
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
try:
self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
self.instance.configstore = self.configstore
self.instance.environment = task['environment']
self.running.set()
result = self.instance.run(*task['args'])
except BaseException as err:
print("Task exception: {0}".format(str(err)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if hasattr(self.instance, 'rollback'):
self.put_status('ROLLBACK')
try:
self.instance.rollback(*task['args'])
except BaseException as rerr:
print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
self.put_status('FAILED', exception=err)
else:
self.put_status('FINISHED', result=result)
finally:
self.running.clear()
except RpcException as err:
print("RPC failed: {0}".format(str(err)), file=sys.stderr)
sys.exit(errno.EBADMSG)
except socket.error as err:
print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr)
sys.exit(errno.ETIMEDOUT)
if task['debugger']:
import pydevd
pydevd.stoptrace()
setproctitle.setproctitle('task executor (idle)')
示例11: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('dscached')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.plugin_dirs = []
self.plugins = {}
def init_datastore(self):
try:
self.datastore = datastore.get_datastore()
except datastore.DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def parse_config(self, filename):
try:
with open(filename, 'r') as f:
self.config = json.load(f)
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['dscached']['plugin-dirs']
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('dscached')
self.client.enable_server()
self.client.register_service('dscached.account', AccountService(self))
self.client.register_service('dscached.group', GroupService(self))
self.client.register_service('dscached.debug', DebugService())
self.client.resume_service('dscached.account')
self.client.resume_service('dscached.group')
self.client.resume_service('dscached.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for f in os.listdir(dir):
name, ext = os.path.splitext(os.path.basename(f))
if ext != '.py':
continue
try:
plugin = imp.load_source(name, os.path.join(dir, f))
plugin._init(self)
except:
self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)
def register_plugin(self, name, cls):
self.plugins[name] = cls(self)
self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
args = parser.parse_args()
configure_logging('/var/log/dscached.log', 'DEBUG')
setproctitle.setproctitle('dscached')
self.config = args.c
self.parse_config(self.config)
self.init_datastore()
self.init_dispatcher()
self.scan_plugins()
self.client.wait_forever()
示例12: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('neighbord')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.config = None
self.logger = logging.getLogger()
self.plugin_dirs = []
self.plugins = {}
def parse_config(self, filename):
try:
with open(filename, 'r') as f:
self.config = json.load(f)
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['neighbord']['plugin-dirs']
def init_datastore(self):
try:
self.datastore = datastore.get_datastore()
except datastore.DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for f in os.listdir(dir):
name, ext = os.path.splitext(os.path.basename(f))
if ext != '.py':
continue
try:
plugin = load_module_from_file(name, os.path.join(dir, f))
plugin._init(self)
except:
self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)
def register_plugin(self, name, cls):
self.plugins[name] = cls(self)
self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))
def register_service(self, name, regtype, port, properties=None):
for plugin in self.plugins.values():
plugin.register(regtype, name, port, properties)
def register(self):
try:
hostname = socket.gethostname()
general = self.client.call_sync('system.general.get_config')
properties = {
'version': self.client.call_sync('system.info.version'),
'description': general['description'],
'tags': ','.join(general['tags'])
}
self.register_service(hostname, 'freenas', 80, properties)
self.register_service(hostname, 'http', 80)
self.register_service(hostname, 'ssh', 22)
self.register_service(hostname, 'sftp-ssh', 22)
except BaseException as err:
self.logger.error('Failed to register services: {0}'.format(str(err)))
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('neighbord')
self.client.enable_server()
self.client.register_service('neighbord.management', ManagementService(self))
self.client.register_service('neighbord.discovery', DiscoveryService(self))
self.client.register_service('neighbord.debug', DebugService())
self.client.resume_service('neighbord.management')
self.client.resume_service('neighbord.discovery')
self.client.resume_service('neighbord.debug')
return
except (OSError, RpcException) as err:
#.........这里部分代码省略.........
示例13: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Context(object):
def __init__(self):
self.logger = logging.getLogger('schedulerd')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.scheduler = None
self.active_tasks = {}
def init_datastore(self):
try:
self.datastore = get_datastore(self.config)
except DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_scheduler(self):
store = FreeNASJobStore()
self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc)
self.scheduler.start()
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('schedulerd')
self.client.enable_server()
self.client.register_service('scheduler.management', ManagementService(self))
self.client.register_service('scheduler.debug', DebugService())
self.client.resume_service('scheduler.management')
self.client.resume_service('scheduler.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def run_job(self, *args, **kwargs):
tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], {
'RUN_AS_USER': 'root',
'CALENDAR_TASK_NAME': kwargs.get('name')
})
self.active_tasks[kwargs['id']] = tid
self.client.call_sync('task.wait', tid, timeout=None)
result = self.client.call_sync('task.status', tid)
if result['state'] != 'FINISHED':
try:
self.client.call_sync('alert.emit', {
'name': 'scheduler.task.failed',
'severity': 'CRITICAL',
'description': 'Task {0} has failed: {1}'.format(
kwargs.get('name', tid),
result['error']['message']
),
})
except RpcException as e:
self.logger.error('Failed to emit alert', exc_info=True)
del self.active_tasks[kwargs['id']]
self.datastore.insert('schedulerd.runs', {
'job_id': kwargs['id'],
'task_id': result['id']
})
def emit_event(self, name, params):
self.client.emit_event(name, params)
def checkin(self):
checkin()
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
args = parser.parse_args()
configure_logging('/var/log/schedulerd.log', 'DEBUG')
setproctitle('schedulerd')
self.config = args.c
self.init_datastore()
self.init_scheduler()
self.init_dispatcher()
self.checkin()
self.client.wait_forever()
示例14: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import enable_server [as 别名]
class Context(object):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.msock = msock.client.Client()
self.msock.on_closed = self.on_msock_close
self.rpc_fd = -1
self.connection_id = None
self.jobs = []
self.state = ConnectionState.OFFLINE
self.config = None
self.keepalive = None
self.connected_at = None
self.cv = Condition()
self.rpc = RpcContext()
self.client = Client()
self.server = Server()
self.middleware_endpoint = None
def start(self, configpath, sockpath):
signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
self.read_config(configpath)
self.server.rpc = RpcContext()
self.server.rpc.register_service_instance("control", ControlService(self))
self.server.start(sockpath)
threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start()
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning("Connection to dispatcher lost")
self.connect_dispatcher()
self.middleware_endpoint = Client()
self.middleware_endpoint.on_error(on_error)
self.connect_dispatcher()
def connect_dispatcher(self):
while True:
try:
self.middleware_endpoint.connect("unix:")
self.middleware_endpoint.login_service("debugd")
self.middleware_endpoint.enable_server()
self.middleware_endpoint.register_service("debugd.management", ControlService(self))
self.middleware_endpoint.resume_service("debugd.management")
return
except (OSError, RpcException) as err:
self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err)))
time.sleep(1)
def read_config(self, path):
try:
with open(path) as f:
self.config = json.load(f)
except (IOError, OSError, ValueError) as err:
self.logger.fatal("Cannot open config file: {0}".format(str(err)))
self.logger.fatal("Exiting.")
sys.exit(1)
def connect(self, discard=False):
if discard:
self.connection_id = None
self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True)
self.keepalive.start()
def connect_keepalive(self):
while True:
try:
if not self.connection_id:
self.connection_id = uuid.uuid4()
self.msock.connect(SUPPORT_PROXY_ADDRESS)
self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS))
self.rpc_fd = self.msock.create_channel(0)
time.sleep(1) # FIXME
self.client = Client()
self.client.connect("fd://", fobj=self.rpc_fd)
self.client.channel_serializer = MSockChannelSerializer(self.msock)
self.client.standalone_server = True
self.client.enable_server()
self.client.register_service("debug", DebugService(self))
self.client.call_sync(
"server.login", str(self.connection_id), socket.gethostname(), get_version(), "none"
)
self.set_state(ConnectionState.CONNECTED)
except BaseException as err:
self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True)
self.msock.disconnect()
else:
self.connected_at = datetime.now()
with self.cv:
self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE))
if self.state == ConnectionState.OFFLINE:
return
self.logger.warning("Support connection lost, retrying in 10 seconds")
time.sleep(10)
def disconnect(self):
self.connected_at = None
#.........这里部分代码省略.........