本文整理汇总了Python中salt.ext.six.iteritems函数的典型用法代码示例。如果您正苦于以下问题:Python iteritems函数的具体用法?Python iteritems怎么用?Python iteritems使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iteritems函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_usage
def test_usage(self):
'''
disk.usage
'''
ret = self.run_function('disk.usage')
self.assertTrue(isinstance(ret, dict))
if not isinstance(ret, dict):
return
if salt.utils.is_darwin():
for key, val in six.iteritems(ret):
self.assertTrue('filesystem' in val)
self.assertTrue('512-blocks' in val)
self.assertTrue('used' in val)
self.assertTrue('available' in val)
self.assertTrue('capacity' in val)
self.assertTrue('iused' in val)
self.assertTrue('ifree' in val)
self.assertTrue('%iused' in val)
else:
for key, val in six.iteritems(ret):
self.assertTrue('filesystem' in val)
self.assertTrue('1K-blocks' in val)
self.assertTrue('used' in val)
self.assertTrue('available' in val)
self.assertTrue('capacity' in val)
示例2: extract_state_confs
def extract_state_confs(data, is_extend=False):
for state_id, state_dict in six.iteritems(data):
if state_id == 'extend' and not is_extend:
extract_state_confs(state_dict, True)
continue
if STATE_NAME in state_dict:
key = STATE_NAME
elif STATE_FUNC in state_dict:
key = STATE_FUNC
else:
continue
to_dict = STATE_CONF_EXT if is_extend else STATE_CONF
conf = to_dict.setdefault(state_id, Bunch())
for sdk in state_dict[key]:
if not isinstance(sdk, dict):
continue
key, val = next(six.iteritems(sdk))
conf[key] = val
if not is_extend and state_id in STATE_CONF_EXT:
extend = STATE_CONF_EXT[state_id]
for requisite in 'require', 'watch', 'listen':
if requisite in extend:
extend[requisite] += to_dict[state_id].get(requisite, [])
to_dict[state_id].update(STATE_CONF_EXT[state_id])
示例3: _extract_index
def _extract_index(index_data, global_index=False):
'''
Instantiates and returns an AllIndex object given a valid index
configuration
'''
parsed_data = {}
keys = []
for key, value in six.iteritems(index_data):
for item in value:
for field, data in six.iteritems(item):
if field == 'hash_key':
parsed_data['hash_key'] = data
elif field == 'hash_key_data_type':
parsed_data['hash_key_data_type'] = data
elif field == 'range_key':
parsed_data['range_key'] = data
elif field == 'range_key_data_type':
parsed_data['range_key_data_type'] = data
elif field == 'name':
parsed_data['name'] = data
elif field == 'read_capacity_units':
parsed_data['read_capacity_units'] = data
elif field == 'write_capacity_units':
parsed_data['write_capacity_units'] = data
if parsed_data['hash_key']:
keys.append(
HashKey(
parsed_data['hash_key'],
data_type=parsed_data['hash_key_data_type']
)
)
if parsed_data['range_key']:
keys.append(
RangeKey(
parsed_data['range_key'],
data_type=parsed_data['range_key_data_type']
)
)
if (
global_index and
parsed_data['read_capacity_units'] and
parsed_data['write_capacity_units']):
parsed_data['throughput'] = {
'read': parsed_data['read_capacity_units'],
'write': parsed_data['write_capacity_units']
}
if parsed_data['name'] and len(keys) > 0:
if global_index:
return GlobalAllIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput']
)
else:
return AllIndex(
parsed_data['name'],
parts=keys
)
示例4: ext_pillar
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
bucket,
key=None,
keyid=None,
verify_ssl=True,
location=None,
multiple_env=False,
environment='base',
prefix='',
service_url=None,
kms_keyid=None,
s3_cache_expire=30, # cache for 30 seconds
s3_sync_on_update=True): # sync cache on update rather than jit
'''
Execute a command and read the output as YAML
'''
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
kms_keyid, location)
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
bucket))
if prefix:
pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix))
if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]:
return {}
metadata = _init(s3_creds, bucket, multiple_env, environment, prefix, s3_cache_expire)
if s3_sync_on_update:
# sync the buckets to the local cache
log.info('Syncing local pillar cache from S3...')
for saltenv, env_meta in six.iteritems(metadata):
for bucket, files in six.iteritems(_find_files(env_meta)):
for file_path in files:
cached_file_path = _get_cached_file_name(bucket, saltenv,
file_path)
log.info('{0} - {1} : {2}'.format(bucket, saltenv,
file_path))
# load the file from S3 if not in the cache or too old
_get_file_from_s3(s3_creds, metadata, saltenv, bucket,
file_path, cached_file_path)
log.info('Sync local pillar cache from S3 completed.')
opts = deepcopy(__opts__)
opts['pillar_roots'][environment] = [os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir]
# Avoid recursively re-adding this same pillar
opts['ext_pillar'] = [x for x in opts['ext_pillar'] if 's3' not in x]
pil = Pillar(opts, __grains__, minion_id, environment)
compiled_pillar = pil.compile_pillar()
return compiled_pillar
示例5: top_matches
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
for saltenv, body in six.iteritems(top):
if self.opts['pillarenv']:
if saltenv != self.opts['pillarenv']:
continue
for match, data in six.iteritems(body):
if self.matcher.confirm_top(
match,
data,
self.opts.get('nodegroups', {}),
):
if saltenv not in matches:
matches[saltenv] = env_matches = []
else:
env_matches = matches[saltenv]
for item in data:
if isinstance(item, six.string_types) and item not in env_matches:
env_matches.append(item)
return matches
示例6: get_cloud_init_mime
def get_cloud_init_mime(cloud_init):
'''
Get a mime multipart encoded string from a cloud-init dict. Currently
supports scripts and cloud-config.
CLI Example:
.. code-block:: bash
salt myminion boto.get_cloud_init_mime <cloud init>
'''
if isinstance(cloud_init, six.string_types):
cloud_init = json.loads(cloud_init)
_cloud_init = email.mime.multipart.MIMEMultipart()
if 'boothooks' in cloud_init:
for script_name, script in six.iteritems(cloud_init['boothooks']):
_script = email.mime.text.MIMEText(script, 'cloud-boothook')
_cloud_init.attach(_script)
if 'scripts' in cloud_init:
for script_name, script in six.iteritems(cloud_init['scripts']):
_script = email.mime.text.MIMEText(script, 'x-shellscript')
_cloud_init.attach(_script)
if 'cloud-config' in cloud_init:
cloud_config = cloud_init['cloud-config']
_cloud_config = email.mime.text.MIMEText(_safe_dump(cloud_config),
'cloud-config')
_cloud_init.attach(_cloud_config)
return _cloud_init.as_string()
示例7: get_enabled
def get_enabled():
'''
Return a list of all enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = []
units = _get_all_unit_files()
services = _get_all_units()
for name, state in six.iteritems(units):
if state == 'enabled':
ret.append(name)
for name, state in six.iteritems(services):
if name in units:
continue
# performance; if the legacy initscript doesnt exists,
# dont contiue up with systemd query
if not _service_is_sysv(name):
continue
if _sysv_is_enabled(name):
ret.append(name)
return sorted(ret)
示例8: top
def top(num_processes=5, interval=3):
'''
Return a list of top CPU consuming processes during the interval.
num_processes = return the top N CPU consuming processes
interval = the number of seconds to sample CPU usage over
CLI Examples:
.. code-block:: bash
salt '*' ps.top
salt '*' ps.top 5 10
'''
result = []
start_usage = {}
for pid in psutil.pids():
try:
process = psutil.Process(pid)
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
start_usage[process] = user + system
time.sleep(interval)
usage = set()
for process, start in six.iteritems(start_usage):
try:
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
now = user + system
diff = now - start
usage.add((diff, process))
for idx, (diff, process) in enumerate(reversed(sorted(usage))):
if num_processes and idx >= num_processes:
break
if len(_get_proc_cmdline(process)) == 0:
cmdline = _get_proc_name(process)
else:
cmdline = _get_proc_cmdline(process)
info = {'cmd': cmdline,
'user': _get_proc_username(process),
'status': _get_proc_status(process),
'pid': _get_proc_pid(process),
'create_time': _get_proc_create_time(process),
'cpu': {},
'mem': {},
}
for key, value in six.iteritems(process.cpu_times()._asdict()):
info['cpu'][key] = value
for key, value in six.iteritems(process.memory_info()._asdict()):
info['mem'][key] = value
result.append(info)
return result
示例9: ext_pillar
def ext_pillar(minion_id, repo, pillar_dirs):
'''
Checkout the ext_pillar sources and compile the resulting pillar SLS
'''
if isinstance(repo, six.string_types):
return _legacy_git_pillar(minion_id, repo, pillar_dirs)
else:
opts = copy.deepcopy(__opts__)
opts['pillar_roots'] = {}
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES)
pillar.checkout()
ret = {}
merge_strategy = __opts__.get(
'pillar_source_merging_strategy',
'smart'
)
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
log.debug(
'git_pillar is processing pillar SLS from {0} for pillar '
'env \'{1}\''.format(pillar_dir, env)
)
opts['pillar_roots'] = {
env: [d for (d, e) in six.iteritems(pillar.pillar_dirs)
if env == e]
}
local_pillar = Pillar(opts, __grains__, minion_id, env)
ret = salt.utils.dictupdate.merge(
ret,
local_pillar.compile_pillar(ext=False),
strategy=merge_strategy
)
return ret
示例10: change
def change(connect_spec, dn, before, after):
"""Modify an entry in an LDAP database.
This does the same thing as :py:func:`modify`, but with a simpler
interface. Instead of taking a list of directives, it takes a
before and after view of an entry, determines the differences
between the two, computes the directives, and executes them.
Any attribute value present in ``before`` but missing in ``after``
is deleted. Any attribute value present in ``after`` but missing
in ``before`` is added. Any attribute value in the database that
is not mentioned in either ``before`` or ``after`` is not altered.
Any attribute value that is present in both ``before`` and
``after`` is ignored, regardless of whether that attribute value
exists in the database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param before:
The expected state of the entry before modification. This is
a dict mapping each attribute name to an iterable of values.
:param after:
The desired state of the entry after modification. This is a
dict mapping each attribute name to an iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.change "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
before="{'example_value': 'before_val'}"
after="{'example_value': 'after_val'}"
"""
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modifyModlist() expects (also to ensure that the caller's dicts
# are not modified)
before = dict(((attr, list(vals)) for attr, vals in six.iteritems(before)))
after = dict(((attr, list(vals)) for attr, vals in six.iteritems(after)))
modlist = ldap.modlist.modifyModlist(before, after)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
示例11: _create_temp_structure
def _create_temp_structure(self, temp_directory, structure):
for folder, files in six.iteritems(structure):
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in six.iteritems(files):
path = os.path.join(temp_directory, folder, name)
with salt.utils.fopen(path, "w+") as fh:
fh.write(content)
示例12: _parse_network_settings
def _parse_network_settings(opts, current):
'''
Filters given options and outputs valid settings for
the global network settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
current = dict((k.lower(), v) for (k, v) in six.iteritems(current))
# Check for supported parameters
retain_settings = opts.get('retain_settings', False)
result = current if retain_settings else {}
valid = _CONFIG_TRUE + _CONFIG_FALSE
if 'enabled' not in opts:
try:
opts['networking'] = current['networking']
_log_default_network('networking', current['networking'])
except ValueError:
_raise_error_network('networking', valid)
else:
opts['networking'] = opts['enabled']
if opts['networking'] in valid:
if opts['networking'] in _CONFIG_TRUE:
result['networking'] = 'yes'
elif opts['networking'] in _CONFIG_FALSE:
result['networking'] = 'no'
else:
_raise_error_network('networking', valid)
if 'hostname' not in opts:
try:
opts['hostname'] = current['hostname']
_log_default_network('hostname', current['hostname'])
except Exception:
_raise_error_network('hostname', ['server1.example.com'])
if opts['hostname']:
result['hostname'] = opts['hostname']
else:
_raise_error_network('hostname', ['server1.example.com'])
if 'nozeroconf' in opts:
if opts['nozeroconf'] in valid:
if opts['nozeroconf'] in _CONFIG_TRUE:
result['nozeroconf'] = 'true'
elif opts['nozeroconf'] in _CONFIG_FALSE:
result['nozeroconf'] = 'false'
else:
_raise_error_network('nozeroconf', valid)
for opt in opts:
if opt not in ['networking', 'hostname', 'nozeroconf']:
result[opt] = opts[opt]
return result
示例13: statelist
def statelist(states_dict, sid_excludes=frozenset(['include', 'exclude'])):
for sid, states in six.iteritems(states_dict):
if sid.startswith('__'):
continue
if sid in sid_excludes:
continue
for sname, args in six.iteritems(states):
if sname.startswith('__'):
continue
yield sid, states, sname, args
示例14: create_launch_configuration
def create_launch_configuration(name, image_id, key_name=None,
security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None,
ebs_optimized=False,
associate_public_ip_address=None,
volume_type=None, delete_on_termination=True,
iops=None, use_block_device_types=False,
region=None, key=None, keyid=None,
profile=None):
'''
Create a launch configuration.
CLI example::
salt myminion boto_asg.create_launch_configuration mylc image_id=ami-0b9c9f62 key_name='mykey' security_groups='["mygroup"]' instance_type='c3.2xlarge'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = json.loads(security_groups)
if isinstance(block_device_mappings, six.string_types):
block_device_mappings = json.loads(block_device_mappings)
_bdms = []
if block_device_mappings:
# Boto requires objects for the mappings and the devices.
_block_device_map = blockdevicemapping.BlockDeviceMapping()
for block_device_dict in block_device_mappings:
for block_device, attributes in six.iteritems(block_device_dict):
_block_device = blockdevicemapping.EBSBlockDeviceType()
for attribute, value in six.iteritems(attributes):
setattr(_block_device, attribute, value)
_block_device_map[block_device] = _block_device
_bdms = [_block_device_map]
lc = autoscale.LaunchConfiguration(
name=name, image_id=image_id, key_name=key_name,
security_groups=security_groups, user_data=user_data,
instance_type=instance_type, kernel_id=kernel_id,
ramdisk_id=ramdisk_id, block_device_mappings=_bdms,
instance_monitoring=instance_monitoring, spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
volume_type=volume_type, delete_on_termination=delete_on_termination,
iops=iops, use_block_device_types=use_block_device_types)
try:
conn.create_launch_configuration(lc)
log.info('Created LC {0}'.format(name))
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to create LC {0}'.format(name)
log.error(msg)
return False
示例15: safe_accept
def safe_accept(target, tgt_type='glob', expr_form=None):
'''
.. versionchanged:: Nitrogen
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Accept a minion's public key after checking the fingerprint over salt-ssh
CLI Example:
.. code-block:: bash
salt-run manage.safe_accept my_minion
salt-run manage.safe_accept minion1,minion2 tgt_type=list
'''
salt_key = salt.key.Key(__opts__)
ssh_client = salt.client.ssh.client.SSHClient()
ret = ssh_client.cmd(target, 'key.finger', tgt_type=tgt_type)
failures = {}
for minion, finger in six.iteritems(ret):
if not FINGERPRINT_REGEX.match(finger):
failures[minion] = finger
else:
fingerprints = salt_key.finger(minion)
accepted = fingerprints.get('minions', {})
pending = fingerprints.get('minions_pre', {})
if minion in accepted:
del ret[minion]
continue
elif minion not in pending:
failures[minion] = ("Minion key {0} not found by salt-key"
.format(minion))
elif pending[minion] != finger:
failures[minion] = ("Minion key {0} does not match the key in "
"salt-key: {1}"
.format(finger, pending[minion]))
else:
subprocess.call(["salt-key", "-qya", minion])
if minion in failures:
del ret[minion]
if failures:
print('safe_accept failed on the following minions:')
for minion, message in six.iteritems(failures):
print(minion)
print('-' * len(minion))
print(message)
print('')
__jid_event__.fire_event({'message': 'Accepted {0:d} keys'.format(len(ret))}, 'progress')
return ret, failures