本文整理汇总了Python中salt.utils.odict.OrderedDict.fromkeys方法的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict.fromkeys方法的具体用法?Python OrderedDict.fromkeys怎么用?Python OrderedDict.fromkeys使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类salt.utils.odict.OrderedDict
的用法示例。
在下文中一共展示了OrderedDict.fromkeys方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compile
# 需要导入模块: from salt.utils.odict import OrderedDict [as 别名]
# 或者: from salt.utils.odict.OrderedDict import fromkeys [as 别名]
def compile(labels, **patterns): # pylint: disable=W0622
'''
Compile patterns.
Args:
labels:
'''
pattern = patterns.pop('_pattern', None)
if pattern:
return pattern
regex = patterns.pop('_regex', False)
escape = patterns.pop('_escape', [])
if not patterns or not labels:
return None
for pattern in list(patterns.keys()):
if pattern not in labels:
patterns.pop(pattern)
default_pattern = get_default_pattern(regex)
escape = escape if escape else []
_escape_text = functools.partial(escape_text, regex=regex)
# Set default values and join patterns for each field
pattern = OrderedDict.fromkeys(labels, None)
for label in labels:
if label in patterns and patterns[label]:
field = patterns[label]
if isinstance(field, re._pattern_type): # pylint: disable=W0212
field = [field.pattern]
if isinstance(field, six.string_types):
field = [field]
if label in escape or not regex:
field = [_escape_text(text) for text in field]
else:
field = default_pattern
pattern[label] = r'(?:{0})'.format(r'|'.join(field))
try:
return re.compile(
r'\n'.join(six.itervalues(pattern)), re.MULTILINE | re.DOTALL
)
except NameError:
raise
示例2: handle_func
# 需要导入模块: from salt.utils.odict import OrderedDict [as 别名]
# 或者: from salt.utils.odict.OrderedDict import fromkeys [as 别名]
def handle_func(self, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.is_windows():
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'jid': '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())}
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
with salt.utils.fopen(fn, 'r') as fp_:
job = salt.payload.Serial(self.opts).load(fp_)
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['fun'] == job['fun'] and os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
func, data['maxrunning']))
return False
salt.utils.daemonize_if(self.opts)
ret['pid'] = os.getpid()
if 'jid_include' not in data or data['jid_include']:
log.debug('schedule.handle_func: adding this job to the jobcache '
'with data {0}'.format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, 'w+') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = None
if 'args' in data:
args = data['args']
kwargs = None
if 'kwargs' in data:
kwargs = data['kwargs']
try:
if args and kwargs:
ret['return'] = self.functions[func](*args, **kwargs)
if args and not kwargs:
ret['return'] = self.functions[func](*args)
if kwargs and not args:
ret['return'] = self.functions[func](**kwargs)
if not kwargs and not args:
ret['return'] = self.functions[func]()
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
rets = OrderedDict.fromkeys(rets).keys()
for returner in rets:
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
ret['success'] = True
self.returners[ret_str](ret)
else:
log.info(
'Job {0} using invalid returner: {1} Ignoring.'.format(
func, returner
)
)
except Exception:
log.exception("Unhandled exception running {0}".format(ret['fun']))
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
#.........这里部分代码省略.........
示例3: handle_func
# 需要导入模块: from salt.utils.odict import OrderedDict [as 别名]
# 或者: from salt.utils.odict.OrderedDict import fromkeys [as 别名]
#.........这里部分代码省略.........
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
ret['schedule'], data['maxrunning']))
return False
else:
try:
log.info('Invalid job file found. Removing.')
os.remove(fn_)
except OSError:
log.info('Unable to remove file: {0}.'.format(fn_))
salt.utils.daemonize_if(self.opts)
ret['pid'] = os.getpid()
if 'jid_include' not in data or data['jid_include']:
log.debug('schedule.handle_func: adding this job to the jobcache '
'with data {0}'.format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, 'wb+') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in ret.iteritems():
kwargs['__pub_{0}'.format(key)] = val
try:
ret['return'] = self.functions[func](*args, **kwargs)
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
if 'returner_config' in data:
ret['ret_config'] = data['returner_config']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
ret['success'] = True
self.returners[ret_str](ret)
else:
log.info(
'Job {0} using invalid returner: {1}. Ignoring.'.format(
func, returner
)
)
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
mret['jid'] = 'req'
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in mret.items():
load[key] = value
channel.send(load)
except Exception:
log.exception("Unhandled exception running {0}".format(ret['fun']))
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
finally:
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
示例4: handle_func
# 需要导入模块: from salt.utils.odict import OrderedDict [as 别名]
# 或者: from salt.utils.odict.OrderedDict import fromkeys [as 别名]
#.........这里部分代码省略.........
)
if ret["schedule"] == job["schedule"] and os_is_running(job["pid"]):
jobcount += 1
log.debug(
"schedule.handle_func: Incrementing jobcount, now "
"{0}, maxrunning is {1}".format(jobcount, data["maxrunning"])
)
if jobcount >= data["maxrunning"]:
log.debug(
"schedule.handle_func: The scheduled job {0} "
"was not started, {1} already running".format(
ret["schedule"], data["maxrunning"]
)
)
return False
else:
try:
log.info("Invalid job file found. Removing.")
os.remove(fn_)
except OSError:
log.info("Unable to remove file: {0}.".format(fn_))
salt.utils.daemonize_if(self.opts)
ret["pid"] = os.getpid()
if "jid_include" not in data or data["jid_include"]:
log.debug("schedule.handle_func: adding this job to the jobcache " "with data {0}".format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, "w+b") as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if "args" in data:
args = data["args"]
kwargs = {}
if "kwargs" in data:
kwargs = data["kwargs"]
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
kwargs["__pub_{0}".format(key)] = val
try:
ret["return"] = self.functions[func](*args, **kwargs)
data_returner = data.get("returner", None)
if data_returner or self.schedule_returner:
if "returner_config" in data:
ret["ret_config"] = data["returner_config"]
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = "{0}.returner".format(returner)
if ret_str in self.returners:
ret["success"] = True
self.returners[ret_str](ret)
else:
log.info("Job {0} using invalid returner: {1}. Ignoring.".format(func, returner))
if "return_job" in data and not data["return_job"]:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
mret["jid"] = "req"
channel = salt.transport.Channel.factory(self.opts, usage="salt_schedule")
load = {"cmd": "_return", "id": self.opts["id"]}
for key, value in six.iteritems(mret):
load[key] = value
channel.send(load)
except Exception:
log.exception("Unhandled exception running {0}".format(ret["fun"]))
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
finally:
try:
log.debug("schedule.handle_func: Removing {0}".format(proc_fn))
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise