本文整理汇总了Python中freenas.dispatcher.client.Client.call_sync方法的典型用法代码示例。如果您正苦于以下问题:Python Client.call_sync方法的具体用法?Python Client.call_sync怎么用?Python Client.call_sync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类freenas.dispatcher.client.Client
的用法示例。
在下文中一共展示了Client.call_sync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
def main(*args):
connection = Client()
connection.connect("127.0.0.1")
connection.login_service("smtp")
parser = argparse.ArgumentParser(description="Process email")
parser.add_argument("-i", dest="strip_leading_dot", action="store_false", default=True, help="see sendmail(8) -i")
parser.add_argument(
"-t", dest="parse_recipients", action="store_true", default=False, help="parse recipients from message"
)
parser.usage = " ".join(parser.format_usage().split(" ")[1:-1])
parser.usage += " [email_addr|user] .."
args, to_addrs = parser.parse_known_args()
if not to_addrs and not args.parse_recipients:
parser.exit(message=parser.format_usage())
msg = sys.stdin.read()
em_parser = email.parser.Parser()
em = em_parser.parsestr(msg)
if args.parse_recipients:
# Strip away the comma based delimiters and whitespace.
to_addrs = map(str.strip, em.get("To").split(","))
if not to_addrs or not to_addrs[0]:
to_addrs = ["root"]
margs = {}
margs["extra_headers"] = dict(em)
margs["extra_headers"].update({"X-Mailer": "FreeNAS", "X-FreeNAS-Host": socket.gethostname()})
margs["subject"] = em.get("Subject")
if em.is_multipart():
margs["attachments"] = filter(lambda part: part.get_content_maintype() != "multipart", em.walk())
margs["message"] = (
"This is a MIME formatted message. If you see "
"this text it means that your email software "
"does not support MIME formatted messages."
)
else:
margs["message"] = "".join(email.iterators.body_line_iterator(em))
if to_addrs:
margs["to"] = to_addrs
connection.call_sync("mail.send", margs)
connection.disconnect()
示例2: SyslogProvider
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class SyslogProvider(Provider):
def initialize(self, context):
self.client = Client()
self.client.connect('unix:///var/run/logd.sock')
@generator
def query(self, filter=None, params=None):
return self.client.call_sync('logd.logging.query', filter, params)
示例3: test_back_to_back
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
def test_back_to_back(self):
a, b = socket.socketpair()
self.assertGreaterEqual(a.fileno(), 0)
self.assertGreaterEqual(b.fileno(), 0)
c1 = Client()
c1.standalone_server = True
c1.enable_server()
c1.register_service('test', TestService())
c1.connect('fd://{0}'.format(a.fileno()))
self.assertTrue(c1.connected)
c2 = Client()
c2.connect('fd://{0}'.format(b.fileno()))
self.assertTrue(c2.connected)
self.assertEqual(c2.call_sync('test.hello', 'freenas'), 'Hello World, freenas')
c2.disconnect()
a.close()
c1.disconnect()
b.close()
示例4: test_unix_server
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
def test_unix_server(self):
sockpath = os.path.join(os.getcwd(), 'test.{0}.sock'.format(os.getpid()))
sockurl = 'unix://' + sockpath
context = RpcContext()
context.register_service('test', TestService)
server = Server()
server.rpc = context
server.start(sockurl)
threading.Thread(target=server.serve_forever, daemon=True).start()
# Spin until server is ready
while not os.path.exists(sockpath):
time.sleep(0.1)
client = Client()
client.connect(sockurl)
self.assertTrue(client.connected)
self.assertEqual(client.call_sync('test.hello', 'freenas'), 'Hello World, freenas')
client.disconnect()
server.close()
os.unlink(sockpath)
示例5: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Context(object):
def __init__(self):
self.logger = logging.getLogger('schedulerd')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.scheduler = None
self.active_tasks = {}
def init_datastore(self):
try:
self.datastore = get_datastore(self.config)
except DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_scheduler(self):
store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client)
self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc)
self.scheduler.start()
def register_schemas(self):
self.client.register_schema('calendar-task', {
'type': 'object',
'additionalProperties': False,
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'args': {'type': 'array'},
'description': {'type': 'string'},
'enabled': {'type': 'boolean'},
'hidden': {'type': 'boolean'},
'protected': {'type': 'boolean'},
'status': {'$ref': 'calendar-task-status'},
'schedule': {
'type': 'object',
'additionalProperties': False,
'properties': {
'coalesce': {'type': ['boolean', 'integer', 'null']},
'year': {'type': ['string', 'integer', 'null']},
'month': {'type': ['string', 'integer', 'null']},
'day': {'type': ['string', 'integer', 'null']},
'week': {'type': ['string', 'integer', 'null']},
'day_of_week': {'type': ['string', 'integer', 'null']},
'hour': {'type': ['string', 'integer', 'null']},
'minute': {'type': ['string', 'integer', 'null']},
'second': {'type': ['string', 'integer', 'null']},
'timezone': {'type': ['string', 'null']}
}
}
}
})
self.client.register_schema('calendar-task-status', {
'type': 'object',
'properties': {
'next_run_time': {'type': 'string'},
'last_run_status': {'type': 'string'},
'current_run_status': {'type': ['string', 'null']},
'current_run_progress': {'type': ['object', 'null']}
}
})
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('schedulerd')
self.client.enable_server()
self.client.register_service('scheduler.management', ManagementService(self))
self.client.register_service('scheduler.debug', DebugService())
self.client.resume_service('scheduler.management')
self.client.resume_service('scheduler.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def run_job(self, *args, **kwargs):
tid = self.client.submit_task(*args)
self.active_tasks[kwargs['id']] = tid
self.client.call_sync('task.wait', tid, timeout=None)
result = self.client.call_sync('task.status', tid)
if result['state'] != 'FINISHED':
try:
self.client.call_sync('alerts.emit', {
'name': 'scheduler.task.failed',
#.........这里部分代码省略.........
示例6: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Main(object):
def __init__(self):
self.client = None
self.datastore = None
self.configstore = None
self.config = None
self.mgmt = None
self.vm_started = Event()
self.containers = {}
self.tokens = {}
self.logger = logging.getLogger('containerd')
self.bridge_interface = None
self.used_nmdms = []
def init_datastore(self):
try:
self.datastore = get_datastore(self.config)
except DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def allocate_nmdm(self):
for i in range(0, 255):
if i not in self.used_nmdms:
self.used_nmdms.append(i)
return i
def release_nmdm(self, index):
self.used_nmdms.remove(index)
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('containerd')
self.client.enable_server()
self.client.register_service('containerd.management', ManagementService(self))
self.client.register_service('containerd.debug', DebugService(gevent=True, builtins={"context": self}))
self.client.resume_service('containerd.management')
self.client.resume_service('containerd.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.use_bursts = True
self.client.on_error(on_error)
self.connect()
def init_mgmt(self):
self.mgmt = ManagementNetwork(self, MGMT_INTERFACE, MGMT_ADDR)
self.mgmt.up()
self.mgmt.bridge_if.add_address(netif.InterfaceAddress(
netif.AddressFamily.INET,
ipaddress.ip_interface('169.254.169.254/32')
))
def init_nat(self):
default_if = self.client.call_sync('networkd.configuration.get_default_interface')
if not default_if:
self.logger.warning('No default route interface; not configuring NAT')
return
p = pf.PF()
# Try to find and remove existing NAT rules for the same subnet
oldrule = first_or_default(
lambda r: r.src.address.address == MGMT_ADDR.network.network_address,
p.get_rules('nat')
)
if oldrule:
p.delete_rule('nat', oldrule.index)
rule = pf.Rule()
rule.src.address.address = MGMT_ADDR.network.network_address
rule.src.address.netmask = MGMT_ADDR.netmask
rule.action = pf.RuleAction.NAT
rule.af = socket.AF_INET
rule.ifname = default_if
rule.redirect_pool.append(pf.Address(ifname=default_if))
rule.proxy_ports = [50001, 65535]
p.append_rule('nat', rule)
try:
p.enable()
except OSError as err:
if err.errno != errno.EEXIST:
raise err
#.........这里部分代码省略.........
示例7: run
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
def run(self, peer, initial_credentials):
hostid = self.dispatcher.call_sync('system.info.host_uuid')
hostname = self.dispatcher.call_sync('system.general.get_config')['hostname']
remote_peer_name = hostname
credentials = peer['credentials']
remote = credentials.get('address')
port = credentials.get('port', 22)
username = initial_credentials.get('username')
password = initial_credentials.get('password')
auth_code = initial_credentials.get('auth_code')
key_auth = initial_credentials.get('key_auth')
local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')
if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')):
raise TaskException(
errno.EEXIST,
'FreeNAS peer entry for {0} already exists'.format(remote)
)
remote_client = Client()
try:
if auth_code:
try:
remote_client.connect('ws://{0}'.format(wrap_address(remote)))
except (AuthenticationException, OSError, ConnectionRefusedError):
raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))
try:
remote_host_uuid, pubkey = remote_client.call_sync(
'peer.freenas.auth_with_code',
auth_code,
hostname,
local_ssh_config['port']
)
except RpcException as err:
raise TaskException(err.code, err.message)
try:
self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey)
if not self.dispatcher.test_or_wait_for_event(
'peer.changed',
lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'],
lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)),
timeout=30
):
raise TaskException(
errno.EAUTH,
'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote)
)
finally:
self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey)
else:
try:
if key_auth:
with io.StringIO() as f:
f.write(self.configstore.get('peer.freenas.key.private'))
f.seek(0)
pkey = RSAKey.from_private_key(f)
max_tries = 50
while True:
try:
remote_client.connect('ws+ssh://[email protected]{0}'.format(
wrap_address(remote)), pkey=pkey, port=port
)
break
except AuthenticationException:
if max_tries:
max_tries -= 1
time.sleep(1)
else:
raise
else:
remote_client.connect(
'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)),
port=port,
password=password
)
remote_client.login_service('replicator')
except (AuthenticationException, OSError, ConnectionRefusedError):
raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))
local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys')
remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys')
ip_at_remote_side = remote_client.local_address[0]
remote_hostname = remote_client.call_sync('system.general.get_config')['hostname']
remote_host_key = remote_host_key.rsplit(' ', 1)[0]
local_host_key = local_host_key.rsplit(' ', 1)[0]
if remote_client.call_sync('peer.query', [('id', '=', hostid)]):
raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote))
peer['credentials'] = {
'%type': 'freenas-credentials',
#.........这里部分代码省略.........
示例8: run
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
def run(self, peer):
if self.datastore.exists('peers', ('address', '=', peer['address']), ('type', '=', 'replication')):
raise TaskException(errno.EEXIST, 'Replication peer entry for {0} already exists'.format(peer['address']))
if peer['credentials']['type'] != 'ssh':
raise TaskException(errno.EINVAL, 'SSH credentials type is needed to perform replication peer pairing')
remote = peer.get('address')
credentials = peer['credentials']
username = credentials.get('username')
port = credentials.get('port', 22)
password = credentials.get('password')
if not username:
raise TaskException(errno.EINVAL, 'Username has to be specified')
if not remote:
raise TaskException(errno.EINVAL, 'Address of remote host has to be specified')
if not password:
raise TaskException(errno.EINVAL, 'Password has to be specified')
remote_client = Client()
try:
try:
remote_client.connect('ws+ssh://{0}@{1}'.format(username, remote), port=port, password=password)
remote_client.login_service('replicator')
except (AuthenticationException, OSError, ConnectionRefusedError):
raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))
local_keys = self.dispatcher.call_sync('peer.get_ssh_keys')
remote_keys = remote_client.call_sync('peer.get_ssh_keys')
ip_at_remote_side = remote_client.call_sync('management.get_sender_address').split(',', 1)[0]
remote_host_key = remote + ' ' + remote_keys[0].rsplit(' ', 1)[0]
local_host_key = ip_at_remote_side + ' ' + local_keys[0].rsplit(' ', 1)[0]
local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')
if remote_client.call_sync('peer.query', [('name', '=', peer['name'])]):
raise TaskException(errno.EEXIST, 'Peer entry {0} already exists at {1}'.format(peer['name'], remote))
peer['credentials'] = {
'pubkey': remote_keys[1],
'hostkey': remote_host_key,
'port': port,
'type': 'replication'
}
self.join_subtasks(self.run_subtask(
'peer.replication.create_local',
peer
))
peer['address'] = ip_at_remote_side
peer['credentials'] = {
'pubkey': local_keys[1],
'hostkey': local_host_key,
'port': local_ssh_config['port'],
'type': 'replication'
}
id = self.datastore.query('peers', ('name', '=', peer['name']), select='id')
try:
call_task_and_check_state(
remote_client,
'peer.replication.create_local',
peer
)
except TaskException:
self.datastore.delete('peers', id)
self.dispatcher.dispatch_event('peer.changed', {
'operation': 'delete',
'ids': [id]
})
raise
finally:
remote_client.disconnect()
示例9: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Context(object):
def __init__(self):
self.service = TaskProxyService(self)
self.task = queue.Queue(1)
self.datastore = None
self.configstore = None
self.conn = None
self.instance = None
self.running = Event()
def put_status(self, state, result=None, exception=None):
obj = {
'status': state,
'result': None
}
if result is not None:
obj['result'] = result
if exception is not None:
obj['error'] = serialize_error(exception)
self.conn.call_sync('task.put_status', obj)
def task_progress_handler(self, args):
if self.instance:
self.instance.task_progress_handler(args)
def collect_fds(self, obj):
if isinstance(obj, dict):
for v in obj.values():
if isinstance(v, FileDescriptor):
yield v
else:
yield from self.collect_fds(v)
if isinstance(obj, (list, tuple)):
for o in obj:
if isinstance(o, FileDescriptor):
yield o
else:
yield from self.collect_fds(o)
def close_fds(self, fds):
for i in fds:
try:
os.close(i.fd)
except OSError:
pass
def main(self):
if len(sys.argv) != 2:
print("Invalid number of arguments", file=sys.stderr)
sys.exit(errno.EINVAL)
key = sys.argv[1]
configure_logging(None, logging.DEBUG)
self.datastore = get_datastore()
self.configstore = ConfigStore(self.datastore)
self.conn = Client()
self.conn.connect('unix:')
self.conn.login_service('task.{0}'.format(os.getpid()))
self.conn.enable_server()
self.conn.rpc.register_service_instance('taskproxy', self.service)
self.conn.register_event_handler('task.progress', self.task_progress_handler)
self.conn.call_sync('task.checkin', key)
setproctitle.setproctitle('task executor (idle)')
while True:
try:
task = self.task.get()
logging.root.setLevel(self.conn.call_sync('management.get_logging_level'))
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
if task['debugger']:
sys.path.append('/usr/local/lib/dispatcher/pydev')
import pydevd
host, port = task['debugger']
pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)
name, _ = os.path.splitext(os.path.basename(task['filename']))
module = load_module_from_file(name, task['filename'])
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
fds = list(self.collect_fds(task['args']))
try:
self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
self.instance.configstore = self.configstore
self.instance.user = task['user']
self.instance.environment = task['environment']
self.running.set()
result = self.instance.run(*task['args'])
except BaseException as err:
print("Task exception: {0}".format(str(err)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if hasattr(self.instance, 'rollback'):
#.........这里部分代码省略.........
示例10: BaseTestCase
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class BaseTestCase(unittest.TestCase):
class TaskState(object):
def __init__(self):
self.tid = None
self.state = None
self.message = None
self.result = None
self.name = None
self.ended = Event()
def __init__(self, methodName):
super(BaseTestCase, self).__init__(methodName)
self.tasks = {}
self.tasks_lock = Lock()
self.conn = None
self.task_timeout = 30
def setUp(self):
try:
self.conn = Client()
self.conn.event_callback = self.on_event
self.conn.connect(os.getenv('TESTHOST', '127.0.0.1'))
self.conn.login_user(os.getenv('TESTUSER', 'root'), os.getenv('TESTPWD', ''), timeout = self.task_timeout)
self.conn.subscribe_events('*')
except:
raise
def tearDown(self):
self.conn.disconnect()
def submitTask(self, name, *args):
with self.tasks_lock:
try:
tid = self.conn.call_sync('task.submit', name, args)
except RpcException:
raise
except Exception:
raise
self.tasks[tid] = self.TaskState()
self.tasks[tid].tid = tid
self.tasks[tid].name = name
return tid
def assertTaskCompletion(self, tid):
t = self.tasks[tid]
if not t.ended.wait(self.task_timeout):
self.fail('Task {0} timed out'.format(tid))
#print dir(t)
#print 'Message is ' + str(t.message)
#print 'State is ' + str(t.state)
#print 'Result is ' + str(t.result)
if t.state.count('Executing...'):
message = t.error
elif t.__getattribute__('message') and t.message.count('Executing...'):
message = t.state
else:
message = t.message
if not message:
self.query_task(tid)
self.assertEqual(t.state, 'FINISHED', msg=message)
def assertTaskFailure(self, tid):
t = self.tasks[tid]
if not t.ended.wait(self.task_timeout):
self.fail('Task {0} timed out'.format(tid))
self.assertNotEqual(t.state, 'FINISHED', msg=t.message)
def assertSeenEvent(self, name, func=None):
pass
def skip(self, reason):
raise unittest.SkipTest(str(reason))
def getTaskResult(self, tid):
t = self.tasks[tid]
return t.result
def on_event(self, name, args):
with self.tasks_lock:
if name == 'task.updated':
#DEBUG
#print 'ARGS IS ' + str(args)
#print 'TASK LIST IS ' + str(self.tasks)
#for pc in self.conn.pending_calls.keys():
# print 'PENDING CALL METHOD ' + str(self.conn.pending_calls[pc].method) + \
# ' and ID ' + str(self.conn.pending_calls[pc].id)
if args['id'] not in self.tasks.keys():
if args['state'] == 'EXECUTING':
return
else:
t = self.tasks[args['id']]
t.state = args['state']
if t.state in ('FINISHED', 'FAILED'):
#.........这里部分代码省略.........
示例11: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Context(object):
def __init__(self):
self.service = TaskProxyService(self)
self.task = queue.Queue(1)
self.datastore = None
self.configstore = None
self.conn = None
self.instance = None
self.running = Event()
def put_status(self, state, result=None, exception=None):
obj = {
'status': state,
'result': None
}
if result is not None:
obj['result'] = result
if exception is not None:
obj['error'] = serialize_error(exception)
self.conn.call_sync('task.put_status', obj)
def main(self):
if len(sys.argv) != 2:
print("Invalid number of arguments", file=sys.stderr)
sys.exit(errno.EINVAL)
key = sys.argv[1]
logging.basicConfig(level=logging.DEBUG)
self.datastore = get_datastore()
self.configstore = ConfigStore(self.datastore)
self.conn = Client()
self.conn.connect('unix:')
self.conn.login_service('task.{0}'.format(os.getpid()))
self.conn.enable_server()
self.conn.rpc.register_service_instance('taskproxy', self.service)
self.conn.call_sync('task.checkin', key)
setproctitle.setproctitle('task executor (idle)')
while True:
try:
task = self.task.get()
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
if task['debugger']:
sys.path.append('/usr/local/lib/dispatcher/pydev')
import pydevd
host, port = task['debugger']
pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)
name, _ = os.path.splitext(os.path.basename(task['filename']))
module = load_module_from_file(name, task['filename'])
setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))
try:
self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
self.instance.configstore = self.configstore
self.instance.environment = task['environment']
self.running.set()
result = self.instance.run(*task['args'])
except BaseException as err:
print("Task exception: {0}".format(str(err)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if hasattr(self.instance, 'rollback'):
self.put_status('ROLLBACK')
try:
self.instance.rollback(*task['args'])
except BaseException as rerr:
print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
self.put_status('FAILED', exception=err)
else:
self.put_status('FINISHED', result=result)
finally:
self.running.clear()
except RpcException as err:
print("RPC failed: {0}".format(str(err)), file=sys.stderr)
sys.exit(errno.EBADMSG)
except socket.error as err:
print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr)
sys.exit(errno.ETIMEDOUT)
if task['debugger']:
import pydevd
pydevd.stoptrace()
setproctitle.setproctitle('task executor (idle)')
示例12: Main
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Main(object):
def __init__(self):
self.logger = logging.getLogger('neighbord')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.config = None
self.logger = logging.getLogger()
self.plugin_dirs = []
self.plugins = {}
def parse_config(self, filename):
try:
with open(filename, 'r') as f:
self.config = json.load(f)
except IOError as err:
self.logger.error('Cannot read config file: %s', err.message)
sys.exit(1)
except ValueError:
self.logger.error('Config file has unreadable format (not valid JSON)')
sys.exit(1)
self.plugin_dirs = self.config['neighbord']['plugin-dirs']
def init_datastore(self):
try:
self.datastore = datastore.get_datastore()
except datastore.DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def scan_plugins(self):
for i in self.plugin_dirs:
self.scan_plugin_dir(i)
def scan_plugin_dir(self, dir):
self.logger.debug('Scanning plugin directory %s', dir)
for f in os.listdir(dir):
name, ext = os.path.splitext(os.path.basename(f))
if ext != '.py':
continue
try:
plugin = load_module_from_file(name, os.path.join(dir, f))
plugin._init(self)
except:
self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)
def register_plugin(self, name, cls):
self.plugins[name] = cls(self)
self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))
def register_service(self, name, regtype, port, properties=None):
for plugin in self.plugins.values():
plugin.register(regtype, name, port, properties)
def register(self):
try:
hostname = socket.gethostname()
general = self.client.call_sync('system.general.get_config')
properties = {
'version': self.client.call_sync('system.info.version'),
'description': general['description'],
'tags': ','.join(general['tags'])
}
self.register_service(hostname, 'freenas', 80, properties)
self.register_service(hostname, 'http', 80)
self.register_service(hostname, 'ssh', 22)
self.register_service(hostname, 'sftp-ssh', 22)
except BaseException as err:
self.logger.error('Failed to register services: {0}'.format(str(err)))
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('neighbord')
self.client.enable_server()
self.client.register_service('neighbord.management', ManagementService(self))
self.client.register_service('neighbord.discovery', DiscoveryService(self))
self.client.register_service('neighbord.debug', DebugService())
self.client.resume_service('neighbord.management')
self.client.resume_service('neighbord.discovery')
self.client.resume_service('neighbord.debug')
return
except (OSError, RpcException) as err:
#.........这里部分代码省略.........
示例13: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Context(object):
def __init__(self):
self.hostname = None
self.connection = Client()
self.ml = None
self.logger = logging.getLogger('cli')
self.plugin_dirs = []
self.task_callbacks = {}
self.plugins = {}
self.variables = VariableStore()
self.root_ns = RootNamespace('')
self.event_masks = ['*']
self.event_divert = False
self.event_queue = six.moves.queue.Queue()
self.keepalive_timer = None
self.argparse_parser = None
config.instance = self
@property
def is_interactive(self):
return os.isatty(sys.stdout.fileno())
def start(self):
self.discover_plugins()
self.connect()
def connect(self):
try:
self.connection.connect(self.hostname)
except socket_error as err:
output_msg(_(
"Could not connect to host: {0} due to error: {1}".format(self.hostname, err)
))
self.argparse_parser.print_help()
sys.exit(1)
def login(self, user, password):
try:
self.connection.login_user(user, password)
self.connection.subscribe_events(*EVENT_MASKS)
self.connection.on_event(self.handle_event)
self.connection.on_error(self.connection_error)
except RpcException as e:
if e.code == errno.EACCES:
self.connection.disconnect()
output_msg(_("Wrong username or password"))
sys.exit(1)
self.login_plugins()
def keepalive(self):
if self.connection.opened:
self.connection.call_sync('management.ping')
def read_middleware_config_file(self, file):
"""
If there is a cli['plugin-dirs'] in middleware.conf use that,
otherwise use the default plugins dir within cli namespace
"""
plug_dirs = None
if file:
with open(file, 'r') as f:
data = json.load(f)
if 'cli' in data and 'plugin-dirs' in data['cli']:
if type(data['cli']['plugin-dirs']) != list:
return
self.plugin_dirs += data['cli']['plugin-dirs']
if plug_dirs is None:
plug_dirs = os.path.dirname(os.path.realpath(__file__))
plug_dirs = os.path.join(plug_dirs, 'plugins')
self.plugin_dirs += [plug_dirs]
def discover_plugins(self):
for dir in self.plugin_dirs:
self.logger.debug(_("Searching for plugins in %s"), dir)
self.__discover_plugin_dir(dir)
def login_plugins(self):
for i in list(self.plugins.values()):
if hasattr(i, '_login'):
i._login(self)
def __discover_plugin_dir(self, dir):
for i in glob.glob1(dir, "*.py"):
self.__try_load_plugin(os.path.join(dir, i))
def __try_load_plugin(self, path):
if path in self.plugins:
return
self.logger.debug(_("Loading plugin from %s"), path)
name, ext = os.path.splitext(os.path.basename(path))
plugin = imp.load_source(name, path)
#.........这里部分代码省略.........
示例14: Context
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class Context(object):
def __init__(self):
self.logger = logging.getLogger('schedulerd')
self.config = None
self.datastore = None
self.configstore = None
self.client = None
self.scheduler = None
self.active_tasks = {}
def init_datastore(self):
try:
self.datastore = get_datastore(self.config)
except DatastoreException as err:
self.logger.error('Cannot initialize datastore: %s', str(err))
sys.exit(1)
self.configstore = ConfigStore(self.datastore)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.client = Client()
self.client.on_error(on_error)
self.connect()
def init_scheduler(self):
store = FreeNASJobStore()
self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc)
self.scheduler.start()
def connect(self):
while True:
try:
self.client.connect('unix:')
self.client.login_service('schedulerd')
self.client.enable_server()
self.client.register_service('scheduler.management', ManagementService(self))
self.client.register_service('scheduler.debug', DebugService())
self.client.resume_service('scheduler.management')
self.client.resume_service('scheduler.debug')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def run_job(self, *args, **kwargs):
tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], {
'RUN_AS_USER': 'root',
'CALENDAR_TASK_NAME': kwargs.get('name')
})
self.active_tasks[kwargs['id']] = tid
self.client.call_sync('task.wait', tid, timeout=None)
result = self.client.call_sync('task.status', tid)
if result['state'] != 'FINISHED':
try:
self.client.call_sync('alert.emit', {
'name': 'scheduler.task.failed',
'severity': 'CRITICAL',
'description': 'Task {0} has failed: {1}'.format(
kwargs.get('name', tid),
result['error']['message']
),
})
except RpcException as e:
self.logger.error('Failed to emit alert', exc_info=True)
del self.active_tasks[kwargs['id']]
self.datastore.insert('schedulerd.runs', {
'job_id': kwargs['id'],
'task_id': result['id']
})
def emit_event(self, name, params):
self.client.emit_event(name, params)
def checkin(self):
checkin()
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
args = parser.parse_args()
configure_logging('/var/log/schedulerd.log', 'DEBUG')
setproctitle('schedulerd')
self.config = args.c
self.init_datastore()
self.init_scheduler()
self.init_dispatcher()
self.checkin()
self.client.wait_forever()
示例15: RESTApi
# 需要导入模块: from freenas.dispatcher.client import Client [as 别名]
# 或者: from freenas.dispatcher.client.Client import call_sync [as 别名]
class RESTApi(object):
def __init__(self):
self.logger = logging.getLogger('restd')
self._cruds = []
self._threads = []
self._rpcs = {}
self._schemas = {}
self._used_schemas = set()
self._services = {}
self._tasks = {}
self.api = falcon.API(middleware=[
AuthMiddleware(),
JSONTranslator(),
])
self.api.add_route('/', SwaggerResource(self))
gevent.signal(signal.SIGINT, self.die)
def init_dispatcher(self):
def on_error(reason, **kwargs):
if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
self.logger.warning('Connection to dispatcher lost')
self.connect()
self.dispatcher = Client()
self.dispatcher.on_error(on_error)
self.connect()
def init_metadata(self):
self._tasks = self.dispatcher.call_sync('discovery.get_tasks')
self._schemas = self.dispatcher.call_sync('discovery.get_schema')
for service in self.dispatcher.call_sync('discovery.get_services'):
self._services[service] = self.dispatcher.call_sync('discovery.get_methods', service)
for method in self._services[service]:
self._rpcs['{0}.{1}'.format(service, method['name'])] = method
def load_plugins(self):
pluginsdir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'plugins'))
for i in glob.glob1(pluginsdir, "*.py"):
try:
loader = importlib.machinery.SourceFileLoader(i.split('.')[0], os.path.join(pluginsdir, i))
mod = loader.load_module()
except:
self.logger.error('Failed to load plugin %s', i, exc_info=True)
raise
mod._init(self)
def connect(self):
while True:
try:
self.dispatcher.connect('unix:')
self.dispatcher.login_service('restd')
return
except (OSError, RpcException) as err:
self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
time.sleep(1)
def __call__(self, environ, start_response):
if 'HTTP_X_REAL_IP' in environ:
environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1)
return self.api.__call__(environ, start_response)
def register_crud(self, klass):
ins = klass(self, self.dispatcher)
self._cruds.append(ins)
def register_singleitem(self, klass):
klass(self, self.dispatcher)
def register_resource(self, klass):
klass(self)
def run(self):
self.init_dispatcher()
self.init_metadata()
self.load_plugins()
server4 = WSGIServer(('', 8889), self, handler_class=RESTWSGIHandler)
self._threads = [gevent.spawn(server4.serve_forever)]
checkin()
gevent.joinall(self._threads)
def die(self, *args):
gevent.killall(self._threads)
sys.exit(0)