本文整理汇总了Python中six.iterkeys函数的典型用法代码示例。如果您正苦于以下问题:Python iterkeys函数的具体用法?Python iterkeys怎么用?Python iterkeys使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iterkeys函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_assignments_in_maxima
def _get_assignments_in_maxima(assignments, prefix=""):
my_variable_names = set(six.iterkeys(assignments))
written_assignments = set()
prefix_subst_dict = dict(
(vn, prefix+vn) for vn in my_variable_names)
from pymbolic.maxima import MaximaStringifyMapper
mstr = MaximaStringifyMapper()
s2p = SympyToPymbolicMapper()
dkill = _DerivativeKiller()
result = []
def write_assignment(name):
symbols = [atm for atm in assignments[name].atoms()
if isinstance(atm, sym.Symbol)
and atm.name in my_variable_names]
for symb in symbols:
if symb.name not in written_assignments:
write_assignment(symb.name)
result.append("%s%s : %s;" % (
prefix, name, mstr(dkill(s2p(
assignments[name].subs(prefix_subst_dict))))))
written_assignments.add(name)
for name in six.iterkeys(assignments):
if name not in written_assignments:
write_assignment(name)
return "\n".join(result)
示例2: test_max_scores_number
def test_max_scores_number(self):
"""Test max score is correct when groups are number-defined."""
s1, s2, s3 = 10.5, 30.5, 59
parameters = [[s1, 2, 10], [s2, 2, 20], [s3, 2, 30]]
header = ["Subtask 1 (10.5)", "Subtask 2 (30.5)", "Subtask 3 (59)"]
# Only group 1_* is public.
public_testcases = dict(self._public_testcases)
self.assertEqual(
GroupThreshold(parameters, public_testcases).max_scores(),
(s1 + s2 + s3, s1, header))
# All groups are public
for testcase in iterkeys(public_testcases):
public_testcases[testcase] = True
self.assertEqual(
GroupThreshold(parameters, public_testcases).max_scores(),
(s1 + s2 + s3, s1 + s2 + s3, header))
# No groups are public
for testcase in iterkeys(public_testcases):
public_testcases[testcase] = False
self.assertEqual(
GroupThreshold(parameters, public_testcases).max_scores(),
(s1 + s2 + s3, 0, header))
示例3: format_resource_attributes
def format_resource_attributes(resource, with_attr=None):
resolver = resource.attributes
if not with_attr:
with_attr = []
def resolve(attr, resolver):
try:
return resolver._resolver(attr)
except Exception:
return None
# if 'show' in attribute_schema, will resolve all attributes of resource
# including the ones are not represented in response of show API, such as
# 'console_urls' for nova server, user can view it by taking with_attr
# parameter
if 'show' in six.iterkeys(resolver):
show_attr = resolve('show', resolver)
# check if 'show' resolved to dictionary. so it's not None
if isinstance(show_attr, collections.Mapping):
for a in with_attr:
if a not in show_attr:
show_attr[a] = resolve(a, resolver)
return show_attr
else:
# remove 'show' attribute if it's None or not a mapping
# then resolve all attributes manually
del resolver._attributes['show']
attributes = set(list(six.iterkeys(resolver)) + with_attr)
return dict((attr, resolve(attr, resolver))
for attr in attributes)
示例4: test_parametric_function_api
def test_parametric_function_api():
"""
Testing :function:`nnabla.parametric_functions.parametric_function_api`.
"""
import nnabla as nn
import inspect
nn.clear_parameters()
shape = (2, 3, 4)
# Signature check
spec = inspect.getargspec(dummy_parametric_function)
assert spec.args == ['shape', 'f', 'i', 's', 'name']
assert spec.defaults == (10, 1, 'dummy', None)
assert dummy_parametric_function.__doc__.splitlines()[0] == 'Doc'
# Verify two different ways does the same thing.
# Using name argument
v = dummy_parametric_function(shape, name='group1')
# Using parameter_scope
with nn.parameter_scope('group1'):
v = dummy_parametric_function(shape)
params = nn.get_parameters()
assert len(params) == 2
assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2']
# No scope
v = dummy_parametric_function(shape)
params = nn.get_parameters()
len(params) == 4
assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2',
'dummy/p1', 'dummy/p2']
nn.clear_parameters()
示例5: _is_scaling_allowed
def _is_scaling_allowed(self):
metadata = self.metadata_get()
if metadata.get('scaling_in_progress'):
return False
try:
# Negative values don't make sense, so they are clamped to zero
cooldown = max(0, self.properties[self.COOLDOWN])
except TypeError:
# If not specified, it will be None, same as cooldown == 0
cooldown = 0
if cooldown != 0:
try:
if 'cooldown' not in metadata:
# Note: this is for supporting old version cooldown logic
if metadata:
last_adjust = next(six.iterkeys(metadata))
if not timeutils.is_older_than(last_adjust, cooldown):
return False
else:
last_adjust = next(six.iterkeys(metadata['cooldown']))
if not timeutils.is_older_than(last_adjust, cooldown):
return False
except ValueError:
# occurs when metadata has only {scaling_in_progress: False}
pass
# Assumes _finished_scaling is called
# after the scaling operation completes
metadata['scaling_in_progress'] = True
self.metadata_set(metadata)
return True
示例6: _sequence_like
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or
`collections.NamedDict`.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# For dictionaries with their values extracted, we always order the values
# by sorting the keys first (see note below). This code allows recreating
# e.g., `OrderedDict`s with their original key ordering.
result = dict(zip(sorted(_six.iterkeys(instance)), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
示例7: assigned_res_type
def assigned_res_type(inst, attr, value):
"""
Assert only one (or none) assigned resource type is defined in the RAML
Root and correctly represented in the RAML.
"""
if value:
if isinstance(value, tuple([dict, list])) and len(value) > 1:
msg = "Too many resource types applied to '{0}'.".format(
inst.display_name
)
raise InvalidResourceNodeError(msg)
res_types = inst.root.raw.get("resourceTypes", {})
res_type_names = [list(iterkeys(i))[0] for i in res_types]
if isinstance(value, list):
item = value[0] # NOCOV
elif isinstance(value, dict):
item = list(iterkeys(value))[0] # NOCOV
else:
item = value
if item not in res_type_names:
msg = ("Resource Type '{0}' is assigned to '{1}' but is not "
"defined in the root of the API.".format(value,
inst.display_name))
raise InvalidResourceNodeError(msg)
示例8: log_prob
def log_prob(self, xs, zs):
"""
Parameters
----------
xs : dict of str to tf.Tensor
Data dictionary. Each key is a data structure used in the
model (Theano shared variable), and its value is the
corresponding realization (tf.Tensor).
zs : dict of str to tf.Tensor
Latent variable dictionary. Each key names a latent variable
used in the model (str), and its value is the corresponding
realization (tf.Tensor).
Returns
-------
tf.Tensor
Scalar, the log joint density log p(xs, zs).
Notes
-----
It wraps around a Python function. The Python function takes
inputs of type np.ndarray and outputs a np.ndarray.
"""
# Store keys so that ``_py_log_prob_args`` knows how each
# value corresponds to a key.
self.xs_keys = list(six.iterkeys(xs))
self.zs_keys = list(six.iterkeys(zs))
# Pass in all tensors as a flattened list for tf.py_func().
inputs = [tf.convert_to_tensor(x) for x in six.itervalues(xs)]
inputs += [tf.convert_to_tensor(z) for z in six.itervalues(zs)]
return tf.py_func(self._py_log_prob_args, inputs, [tf.float32])[0]
示例9: setUp
def setUp(self):
super(TestOVNGatewayScheduler, self).setUp()
# Overwritten by derived classes
self.l3_scheduler = None
# Used for unit tests
self.new_router_name = 'router_new'
self.fake_chassis_router_mappings = {
'None': {'Chassis': [],
'Routers': {'r1': ovn_const.OVN_GATEWAY_INVALID_CHASSIS}},
'Multiple1': {'Chassis': ['hv1', 'hv2'],
'Routers': {'r1': 'hv1', 'r2': 'hv2', 'r3': 'hv1'}},
'Multiple2': {'Chassis': ['hv1', 'hv2', 'hv3'],
'Routers': {'r1': 'hv1', 'r2': 'hv1', 'r3': 'hv1'}},
'Multiple3': {'Chassis': ['hv1', 'hv2', 'hv3'],
'Routers': {'r1': 'hv3', 'r2': 'hv2', 'r3': 'hv2'}}
}
# Determine the chassis to router list bindings
for details in six.itervalues(self.fake_chassis_router_mappings):
self.assertNotIn(self.new_router_name,
six.iterkeys(details['Routers']))
details.setdefault('Chassis_Bindings', {})
for chassis in details['Chassis']:
details['Chassis_Bindings'].setdefault(chassis, [])
for router, chassis in six.iteritems(details['Routers']):
if chassis in six.iterkeys(details['Chassis_Bindings']):
details['Chassis_Bindings'][chassis].append(router)
示例10: test_nonall_item_key_value_lists
def test_nonall_item_key_value_lists(self):
for init in self.inits:
dic = odict(init.items())
omd = omdict(init.items())
# Testing items(), keys(), values(), lists(), and listitems().
assert omd.items() == list(dic.items())
assert omd.keys() == list(dic.keys())
assert omd.values() == list(dic.values())
iterator = zip(omd.keys(), omd.lists(), omd.listitems())
for key, valuelist, listitem in iterator:
assert omd.values(key) == omd.getlist(key) == valuelist
assert omd.items(key) == [i for i in init.items() if i[0] == key]
assert listitem == (key, valuelist)
# Testing iteritems(), iterkeys(), itervalues(), and iterlists().
for key1, key2 in zip(omd.iterkeys(), six.iterkeys(dic)):
assert key1 == key2
for val1, val2 in zip(omd.itervalues(), six.itervalues(dic)):
assert val1 == val2
for item1, item2 in zip(omd.iteritems(), six.iteritems(dic)):
assert item1 == item2
for key, values in zip(six.iterkeys(omd), omd.iterlists()):
assert omd.getlist(key) == values
iterator = zip(omd.iterkeys(), omd.iterlists(), omd.iterlistitems())
for key, valuelist, listitem in iterator:
assert listitem == (key, valuelist)
# Test iteritems() and itervalues() with a key.
for key in omd.iterkeys():
assert list(omd.iteritems(key)) == list(zip(repeat(key), omd.getlist(key)))
assert list(omd.iterallitems(key)) == list(zip(repeat(key), omd.getlist(key)))
for nonkey in self.nonkeys:
self.assertRaises(KeyError, omd.iteritems, nonkey)
self.assertRaises(KeyError, omd.itervalues, nonkey)
示例11: _compare_odict_and_omddict
def _compare_odict_and_omddict(self, d, omd):
assert len(d) == len(omd) # __len__().
# __contains__(), has_key(), get(), and setdefault().
for dkey, omdkey in zip(d, omd):
assert dkey == omdkey and dkey in d and omdkey in omd
assert dkey in d and omdkey in omd
assert d.get(dkey) == omd.get(omdkey)
d.setdefault(dkey, _unique)
omd.setdefault(omdkey, _unique)
assert d.get(dkey) == omd.get(omdkey) and d.get(dkey) != _unique
for nonkey in self.nonkeys:
assert d.get(nonkey) == omd.get(nonkey) is None
d.setdefault(nonkey, _unique)
omd.setdefault(nonkey, _unique)
assert d.get(nonkey) == omd.get(nonkey) == _unique
# items(), keys, values(), iteritems(), iterkeys, and itervalues().
iterators = [
zip(d.items(), omd.items(), d.keys(), omd.keys(), d.values(), omd.values()),
zip(
six.iteritems(d),
six.iteritems(omd),
six.iterkeys(d),
six.iterkeys(omd),
six.itervalues(d),
six.itervalues(omd),
),
]
for iterator in iterators:
for ditem, omditem, dkey, omdkey, dvalue, omdvalue in iterator:
assert dkey == omdkey
assert ditem == omditem
assert dvalue == omdvalue
# pop().
dcopy, omdcopy = d.copy(), omd.copy()
while dcopy and omdcopy:
dpop = dcopy.pop(list(dcopy.keys())[0])
omdpop = omdcopy.pop(list(omdcopy.keys())[0])
assert dpop == omdpop
# popitem().
dcopy, omdcopy = d.copy(), omd.copy()
while dcopy and omdcopy:
assert dcopy.popitem() == omdcopy.popitem()
# __getitem__().
for dkey, omdkey in zip(six.iterkeys(d), six.iterkeys(omd)):
assert d[dkey] == omd[omdkey]
# __setitem__().
for dkey, omdkey in zip(d, omd):
d[dkey] = _unique
omd[omdkey] = _unique
assert dkey == omdkey and d[dkey] == omd[omdkey]
# __delitem__().
while d and omd:
dkey, omdkey = list(d.keys())[0], list(omd.keys())[0]
del d[dkey]
del omd[omdkey]
assert dkey == omdkey and dkey not in d and omdkey not in omd
示例12: compute_edge_map
def compute_edge_map(mesh0, mesh1):
"""
Compute map from edges of mesh0 to vertices of mesh1.
*Arguments*
mesh0
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
mesh1
a :py:class:`Mesh <dolfin.cpp.Mesh>`.
It is assumed that both meshes have a :py:class:`MeshFunction
<dolfin.cpp.MeshFunction>` over the vertices named
"parent_vertex_indices" which contain a mapping from the
local vertices to a common parent vertex numbering.
"""
# Check arguments
if not isinstance(mesh0, Mesh):
raise TypeError("expected 'Mesh' as argument")
if not isinstance(mesh1, Mesh):
raise TypeError("expected 'Mesh' as argument")
# Get parent vertex numbers
vertices0 = mesh0.data().array("parent_vertex_indices", 0)
vertices1 = mesh1.data().array("parent_vertex_indices", 0)
# Check mappings
if len(vertices0) == 0 or len(vertices1) == 0:
cpp.dolfin_error("ale.py",
"compute edge map",
"Parent vertex indices are missing")
# Initialize edges
mesh0.init(1)
mesh1.init(1)
# Build parent to local map from vertex pair to local edge for mesh0
parent_to_local_mesh0 = {}
for edge in edges(mesh0):
v = [vertices0[int(i)] for i in edge.entities(0)]
v.sort()
parent_to_local_mesh0[tuple(v)] = edge.index()
# Build parent to local map from vertex pair to local edge for mesh1
parent_to_local_mesh1 = {}
for edge in edges(mesh1):
v = [vertices1[int(i)] for i in edge.entities(0)]
v.sort()
parent_to_local_mesh1[tuple(v)] = edge.index()
# Get common edges
common_edges = set(six.iterkeys(parent_to_local_mesh0)).intersection(set(six.iterkeys(parent_to_local_mesh1)))
# Compute map
edge_map = {}
for edge in common_edges:
edge_map[parent_to_local_mesh0[edge]] = parent_to_local_mesh1[edge]
return edge_map
示例13: _update_list_with_key
def _update_list_with_key(old_list, new_list, key,
preserve_old=False, update_value_fn=None):
"""Update a SQLAlchemy list-relationship, using key for identity
Make old_list look like new_list, in a similar way to _update_dict, as
if the list was a dictionary with key computed using the key function.
If preserve_old is true, elements in old_list with a key not present in
new_list will be preserved.
"""
if update_value_fn is None:
update_value_fn = _update_object
old_dict = dict((key(v), v) for v in old_list)
new_dict = dict((key(v), v) for v in new_list)
for k in set(iterkeys(old_dict)) | set(iterkeys(new_dict)):
if k in new_dict:
if k not in old_dict:
# Add new value to the old dictionary.
temp = new_dict[k]
new_list.remove(temp)
old_list.append(temp)
else:
# Update the value in old_dict with the new value.
update_value_fn(old_dict[k], new_dict[k])
elif not preserve_old:
# Remove the old value not anymore present.
old_list.remove(old_dict[k])
示例14: _update_dict
def _update_dict(old_dict, new_dict, update_value_fn=None):
"""Update a SQLAlchemy relationship with type dict
Make old_dict look like new_dict, by:
- calling update_value_fn to overwrite the values of old_dict with a
corresponding value in new_dict;
- deleting all entries in old_dict whose key is not in new_dict;
- moving all entries in new_dict whose key is not in old_dict.
"""
if update_value_fn is None:
update_value_fn = _update_object
for key in set(iterkeys(old_dict)) | set(iterkeys(new_dict)):
if key in new_dict:
if key not in old_dict:
# Move the object from new_dict to old_dict. For some funny
# behavior of SQLAlchemy-instrumented collections when
# copying values, that resulted in new objects being added
# to the session.
temp = new_dict[key]
del new_dict[key]
old_dict[key] = temp
else:
# Update the old value with the new value.
update_value_fn(old_dict[key], new_dict[key])
else:
# Delete the old value if no new value for that key.
del old_dict[key]
示例15: test_rm
def test_rm(tmpdir):
tmpdir = six.text_type(tmpdir)
shutil.copytree(
join(dirname(__file__), 'example_results'),
join(tmpdir, 'example_results'))
conf = config.Config.from_json({
'results_dir': join(tmpdir, 'example_results'),
'repo': "### IGNORED, BUT REQUIRED ###"
})
tools.run_asv_with_conf(conf, 'rm', '-y', 'benchmark=time_quantity*')
results_a = list(results.iter_results(tmpdir))
for result in results_a:
for key in six.iterkeys(result.results):
assert not key.startswith('time_quantity')
for key in six.iterkeys(result.started_at):
assert not key.startswith('time_quantity')
for key in six.iterkeys(result.ended_at):
assert not key.startswith('time_quantity')
tools.run_asv_with_conf(conf, 'rm', '-y', 'commit_hash=05d283b9')
results_b = list(results.iter_results(tmpdir))
assert len(results_b) == len(results_a) - 1